]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.3.3-201204231833.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.3-201204231833.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 0c083c5..bf13011 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49 +builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55 +clut_vga16.c
56 +common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63 +config.c
64 config.mak
65 config.mak.autogen
66 +config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70 @@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74 +dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78 +exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82 @@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86 +gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93 +hash
94 +hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98 @@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102 -kconfig
103 +kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107 @@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111 -linux
112 +lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116 @@ -165,14 +181,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120 -media
121 mconf
122 +mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129 +mkpiggy
130 mkprep
131 mkregtable
132 mktables
133 @@ -208,6 +225,7 @@ r300_reg_safe.h
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137 +regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
141 @@ -218,6 +236,7 @@ setup
142 setup.bin
143 setup.elf
144 sImage
145 +slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149 @@ -228,6 +247,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153 +user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157 @@ -245,7 +265,9 @@ vmlinux
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161 +vmlinux.bin.bz2
162 vmlinux.lds
163 +vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
167 @@ -253,9 +275,11 @@ vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171 +utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177 +zconf.lex.c
178 zoffset.h
179 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
180 index d99fd9c..8689fef 100644
181 --- a/Documentation/kernel-parameters.txt
182 +++ b/Documentation/kernel-parameters.txt
183 @@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188 + virtualization environments that don't cope well with the
189 + expand down segment used by UDEREF on X86-32 or the frequent
190 + page table updates on X86-64.
191 +
192 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193 +
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197 diff --git a/Makefile b/Makefile
198 index 0acd141..865e73d 100644
199 --- a/Makefile
200 +++ b/Makefile
201 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206 -HOSTCXXFLAGS = -O2
207 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
214 # Rules shared between *config targets and build targets
215
216 # Basic helpers built in scripts/
217 -PHONY += scripts_basic
218 -scripts_basic:
219 +PHONY += scripts_basic gcc-plugins
220 +scripts_basic: gcc-plugins
221 $(Q)$(MAKE) $(build)=scripts/basic
222 $(Q)rm -f .tmp_quiet_recordmcount
223
224 @@ -564,6 +565,55 @@ else
225 KBUILD_CFLAGS += -O2
226 endif
227
228 +ifndef DISABLE_PAX_PLUGINS
229 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
230 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
231 +ifndef CONFIG_UML
232 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
233 +endif
234 +endif
235 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
236 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
237 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
238 +endif
239 +ifdef CONFIG_KALLOCSTAT_PLUGIN
240 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
241 +endif
242 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
243 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
244 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
245 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
246 +endif
247 +ifdef CONFIG_CHECKER_PLUGIN
248 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
249 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
250 +endif
251 +endif
252 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
253 +ifdef CONFIG_PAX_SIZE_OVERFLOW
254 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
255 +endif
256 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
257 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
258 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
259 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
260 +ifeq ($(KBUILD_EXTMOD),)
261 +gcc-plugins:
262 + $(Q)$(MAKE) $(build)=tools/gcc
263 +else
264 +gcc-plugins: ;
265 +endif
266 +else
267 +gcc-plugins:
268 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
269 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
270 +else
271 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
272 +endif
273 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
274 +endif
275 +endif
276 +
277 include $(srctree)/arch/$(SRCARCH)/Makefile
278
279 ifneq ($(CONFIG_FRAME_WARN),0)
280 @@ -708,7 +758,7 @@ export mod_strip_cmd
281
282
283 ifeq ($(KBUILD_EXTMOD),)
284 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
285 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
286
287 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
288 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
289 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
290
291 # The actual objects are generated when descending,
292 # make sure no implicit rule kicks in
293 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
294 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
295 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
296
297 # Handle descending into subdirectories listed in $(vmlinux-dirs)
298 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
299 # Error messages still appears in the original language
300
301 PHONY += $(vmlinux-dirs)
302 -$(vmlinux-dirs): prepare scripts
303 +$(vmlinux-dirs): gcc-plugins prepare scripts
304 $(Q)$(MAKE) $(build)=$@
305
306 # Store (new) KERNELRELASE string in include/config/kernel.release
307 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
308 $(Q)$(MAKE) $(build)=.
309
310 # All the preparing..
311 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
312 prepare: prepare0
313
314 # Generate some files
315 @@ -1089,6 +1142,8 @@ all: modules
316 # using awk while concatenating to the final file.
317
318 PHONY += modules
319 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
320 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
321 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
322 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
323 @$(kecho) ' Building modules, stage 2.';
324 @@ -1104,7 +1159,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
325
326 # Target to prepare building external modules
327 PHONY += modules_prepare
328 -modules_prepare: prepare scripts
329 +modules_prepare: gcc-plugins prepare scripts
330
331 # Target to install modules
332 PHONY += modules_install
333 @@ -1201,6 +1256,7 @@ distclean: mrproper
334 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
335 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
336 -o -name '.*.rej' \
337 + -o -name '.*.rej' -o -name '*.so' \
338 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
339 -type f -print | xargs rm -f
340
341 @@ -1361,6 +1417,8 @@ PHONY += $(module-dirs) modules
342 $(module-dirs): crmodverdir $(objtree)/Module.symvers
343 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
344
345 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
346 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
347 modules: $(module-dirs)
348 @$(kecho) ' Building modules, stage 2.';
349 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
350 @@ -1487,17 +1545,21 @@ else
351 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
352 endif
353
354 -%.s: %.c prepare scripts FORCE
355 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 +%.s: %.c gcc-plugins prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 %.i: %.c prepare scripts FORCE
360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
361 -%.o: %.c prepare scripts FORCE
362 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
363 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
364 +%.o: %.c gcc-plugins prepare scripts FORCE
365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
366 %.lst: %.c prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368 -%.s: %.S prepare scripts FORCE
369 +%.s: %.S gcc-plugins prepare scripts FORCE
370 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
371 -%.o: %.S prepare scripts FORCE
372 +%.o: %.S gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.symtypes: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376 @@ -1507,11 +1569,15 @@ endif
377 $(cmd_crmodverdir)
378 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
379 $(build)=$(build-dir)
380 -%/: prepare scripts FORCE
381 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%/: gcc-plugins prepare scripts FORCE
384 $(cmd_crmodverdir)
385 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
386 $(build)=$(build-dir)
387 -%.ko: prepare scripts FORCE
388 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 +%.ko: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir) $(@:.ko=.o)
394 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
395 index 640f909..48b6597 100644
396 --- a/arch/alpha/include/asm/atomic.h
397 +++ b/arch/alpha/include/asm/atomic.h
398 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
399 #define atomic_dec(v) atomic_sub(1,(v))
400 #define atomic64_dec(v) atomic64_sub(1,(v))
401
402 +#define atomic64_read_unchecked(v) atomic64_read(v)
403 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
404 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
405 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
406 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
407 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
408 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
409 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
410 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
411 +
412 #define smp_mb__before_atomic_dec() smp_mb()
413 #define smp_mb__after_atomic_dec() smp_mb()
414 #define smp_mb__before_atomic_inc() smp_mb()
415 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
416 index ad368a9..fbe0f25 100644
417 --- a/arch/alpha/include/asm/cache.h
418 +++ b/arch/alpha/include/asm/cache.h
419 @@ -4,19 +4,19 @@
420 #ifndef __ARCH_ALPHA_CACHE_H
421 #define __ARCH_ALPHA_CACHE_H
422
423 +#include <linux/const.h>
424
425 /* Bytes per L1 (data) cache line. */
426 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
427 -# define L1_CACHE_BYTES 64
428 # define L1_CACHE_SHIFT 6
429 #else
430 /* Both EV4 and EV5 are write-through, read-allocate,
431 direct-mapped, physical.
432 */
433 -# define L1_CACHE_BYTES 32
434 # define L1_CACHE_SHIFT 5
435 #endif
436
437 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
438 #define SMP_CACHE_BYTES L1_CACHE_BYTES
439
440 #endif
441 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
442 index da5449e..7418343 100644
443 --- a/arch/alpha/include/asm/elf.h
444 +++ b/arch/alpha/include/asm/elf.h
445 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
446
447 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
448
449 +#ifdef CONFIG_PAX_ASLR
450 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
451 +
452 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
453 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
454 +#endif
455 +
456 /* $0 is set by ld.so to a pointer to a function which might be
457 registered using atexit. This provides a mean for the dynamic
458 linker to call DT_FINI functions for shared libraries that have
459 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
460 index de98a73..bd4f1f8 100644
461 --- a/arch/alpha/include/asm/pgtable.h
462 +++ b/arch/alpha/include/asm/pgtable.h
463 @@ -101,6 +101,17 @@ struct vm_area_struct;
464 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
465 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
466 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
467 +
468 +#ifdef CONFIG_PAX_PAGEEXEC
469 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
470 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
471 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
472 +#else
473 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
474 +# define PAGE_COPY_NOEXEC PAGE_COPY
475 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
476 +#endif
477 +
478 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
479
480 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
481 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
482 index 2fd00b7..cfd5069 100644
483 --- a/arch/alpha/kernel/module.c
484 +++ b/arch/alpha/kernel/module.c
485 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
486
487 /* The small sections were sorted to the end of the segment.
488 The following should definitely cover them. */
489 - gp = (u64)me->module_core + me->core_size - 0x8000;
490 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
491 got = sechdrs[me->arch.gotsecindex].sh_addr;
492
493 for (i = 0; i < n; i++) {
494 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
495 index 01e8715..be0e80f 100644
496 --- a/arch/alpha/kernel/osf_sys.c
497 +++ b/arch/alpha/kernel/osf_sys.c
498 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
499 /* At this point: (!vma || addr < vma->vm_end). */
500 if (limit - len < addr)
501 return -ENOMEM;
502 - if (!vma || addr + len <= vma->vm_start)
503 + if (check_heap_stack_gap(vma, addr, len))
504 return addr;
505 addr = vma->vm_end;
506 vma = vma->vm_next;
507 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
508 merely specific addresses, but regions of memory -- perhaps
509 this feature should be incorporated into all ports? */
510
511 +#ifdef CONFIG_PAX_RANDMMAP
512 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
513 +#endif
514 +
515 if (addr) {
516 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
517 if (addr != (unsigned long) -ENOMEM)
518 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
519 }
520
521 /* Next, try allocating at TASK_UNMAPPED_BASE. */
522 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
523 - len, limit);
524 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
525 +
526 if (addr != (unsigned long) -ENOMEM)
527 return addr;
528
529 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
530 index fadd5f8..904e73a 100644
531 --- a/arch/alpha/mm/fault.c
532 +++ b/arch/alpha/mm/fault.c
533 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
534 __reload_thread(pcb);
535 }
536
537 +#ifdef CONFIG_PAX_PAGEEXEC
538 +/*
539 + * PaX: decide what to do with offenders (regs->pc = fault address)
540 + *
541 + * returns 1 when task should be killed
542 + * 2 when patched PLT trampoline was detected
543 + * 3 when unpatched PLT trampoline was detected
544 + */
545 +static int pax_handle_fetch_fault(struct pt_regs *regs)
546 +{
547 +
548 +#ifdef CONFIG_PAX_EMUPLT
549 + int err;
550 +
551 + do { /* PaX: patched PLT emulation #1 */
552 + unsigned int ldah, ldq, jmp;
553 +
554 + err = get_user(ldah, (unsigned int *)regs->pc);
555 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
556 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
557 +
558 + if (err)
559 + break;
560 +
561 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
562 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
563 + jmp == 0x6BFB0000U)
564 + {
565 + unsigned long r27, addr;
566 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
567 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
568 +
569 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
570 + err = get_user(r27, (unsigned long *)addr);
571 + if (err)
572 + break;
573 +
574 + regs->r27 = r27;
575 + regs->pc = r27;
576 + return 2;
577 + }
578 + } while (0);
579 +
580 + do { /* PaX: patched PLT emulation #2 */
581 + unsigned int ldah, lda, br;
582 +
583 + err = get_user(ldah, (unsigned int *)regs->pc);
584 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
585 + err |= get_user(br, (unsigned int *)(regs->pc+8));
586 +
587 + if (err)
588 + break;
589 +
590 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
591 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
592 + (br & 0xFFE00000U) == 0xC3E00000U)
593 + {
594 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
595 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
596 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
597 +
598 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
599 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
600 + return 2;
601 + }
602 + } while (0);
603 +
604 + do { /* PaX: unpatched PLT emulation */
605 + unsigned int br;
606 +
607 + err = get_user(br, (unsigned int *)regs->pc);
608 +
609 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
610 + unsigned int br2, ldq, nop, jmp;
611 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
612 +
613 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
614 + err = get_user(br2, (unsigned int *)addr);
615 + err |= get_user(ldq, (unsigned int *)(addr+4));
616 + err |= get_user(nop, (unsigned int *)(addr+8));
617 + err |= get_user(jmp, (unsigned int *)(addr+12));
618 + err |= get_user(resolver, (unsigned long *)(addr+16));
619 +
620 + if (err)
621 + break;
622 +
623 + if (br2 == 0xC3600000U &&
624 + ldq == 0xA77B000CU &&
625 + nop == 0x47FF041FU &&
626 + jmp == 0x6B7B0000U)
627 + {
628 + regs->r28 = regs->pc+4;
629 + regs->r27 = addr+16;
630 + regs->pc = resolver;
631 + return 3;
632 + }
633 + }
634 + } while (0);
635 +#endif
636 +
637 + return 1;
638 +}
639 +
640 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
641 +{
642 + unsigned long i;
643 +
644 + printk(KERN_ERR "PAX: bytes at PC: ");
645 + for (i = 0; i < 5; i++) {
646 + unsigned int c;
647 + if (get_user(c, (unsigned int *)pc+i))
648 + printk(KERN_CONT "???????? ");
649 + else
650 + printk(KERN_CONT "%08x ", c);
651 + }
652 + printk("\n");
653 +}
654 +#endif
655
656 /*
657 * This routine handles page faults. It determines the address,
658 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
659 good_area:
660 si_code = SEGV_ACCERR;
661 if (cause < 0) {
662 - if (!(vma->vm_flags & VM_EXEC))
663 + if (!(vma->vm_flags & VM_EXEC)) {
664 +
665 +#ifdef CONFIG_PAX_PAGEEXEC
666 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
667 + goto bad_area;
668 +
669 + up_read(&mm->mmap_sem);
670 + switch (pax_handle_fetch_fault(regs)) {
671 +
672 +#ifdef CONFIG_PAX_EMUPLT
673 + case 2:
674 + case 3:
675 + return;
676 +#endif
677 +
678 + }
679 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
680 + do_group_exit(SIGKILL);
681 +#else
682 goto bad_area;
683 +#endif
684 +
685 + }
686 } else if (!cause) {
687 /* Allow reads even for write-only mappings */
688 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
689 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
690 index 86976d0..8e07f84 100644
691 --- a/arch/arm/include/asm/atomic.h
692 +++ b/arch/arm/include/asm/atomic.h
693 @@ -15,6 +15,10 @@
694 #include <linux/types.h>
695 #include <asm/system.h>
696
697 +#ifdef CONFIG_GENERIC_ATOMIC64
698 +#include <asm-generic/atomic64.h>
699 +#endif
700 +
701 #define ATOMIC_INIT(i) { (i) }
702
703 #ifdef __KERNEL__
704 @@ -25,7 +29,15 @@
705 * atomic_set() is the clrex or dummy strex done on every exception return.
706 */
707 #define atomic_read(v) (*(volatile int *)&(v)->counter)
708 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
709 +{
710 + return v->counter;
711 +}
712 #define atomic_set(v,i) (((v)->counter) = (i))
713 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
714 +{
715 + v->counter = i;
716 +}
717
718 #if __LINUX_ARM_ARCH__ >= 6
719
720 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
721 int result;
722
723 __asm__ __volatile__("@ atomic_add\n"
724 +"1: ldrex %1, [%3]\n"
725 +" adds %0, %1, %4\n"
726 +
727 +#ifdef CONFIG_PAX_REFCOUNT
728 +" bvc 3f\n"
729 +"2: bkpt 0xf103\n"
730 +"3:\n"
731 +#endif
732 +
733 +" strex %1, %0, [%3]\n"
734 +" teq %1, #0\n"
735 +" bne 1b"
736 +
737 +#ifdef CONFIG_PAX_REFCOUNT
738 +"\n4:\n"
739 + _ASM_EXTABLE(2b, 4b)
740 +#endif
741 +
742 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
743 + : "r" (&v->counter), "Ir" (i)
744 + : "cc");
745 +}
746 +
747 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
748 +{
749 + unsigned long tmp;
750 + int result;
751 +
752 + __asm__ __volatile__("@ atomic_add_unchecked\n"
753 "1: ldrex %0, [%3]\n"
754 " add %0, %0, %4\n"
755 " strex %1, %0, [%3]\n"
756 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
757 smp_mb();
758
759 __asm__ __volatile__("@ atomic_add_return\n"
760 +"1: ldrex %1, [%3]\n"
761 +" adds %0, %1, %4\n"
762 +
763 +#ifdef CONFIG_PAX_REFCOUNT
764 +" bvc 3f\n"
765 +" mov %0, %1\n"
766 +"2: bkpt 0xf103\n"
767 +"3:\n"
768 +#endif
769 +
770 +" strex %1, %0, [%3]\n"
771 +" teq %1, #0\n"
772 +" bne 1b"
773 +
774 +#ifdef CONFIG_PAX_REFCOUNT
775 +"\n4:\n"
776 + _ASM_EXTABLE(2b, 4b)
777 +#endif
778 +
779 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
780 + : "r" (&v->counter), "Ir" (i)
781 + : "cc");
782 +
783 + smp_mb();
784 +
785 + return result;
786 +}
787 +
788 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
789 +{
790 + unsigned long tmp;
791 + int result;
792 +
793 + smp_mb();
794 +
795 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
796 "1: ldrex %0, [%3]\n"
797 " add %0, %0, %4\n"
798 " strex %1, %0, [%3]\n"
799 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
800 int result;
801
802 __asm__ __volatile__("@ atomic_sub\n"
803 +"1: ldrex %1, [%3]\n"
804 +" subs %0, %1, %4\n"
805 +
806 +#ifdef CONFIG_PAX_REFCOUNT
807 +" bvc 3f\n"
808 +"2: bkpt 0xf103\n"
809 +"3:\n"
810 +#endif
811 +
812 +" strex %1, %0, [%3]\n"
813 +" teq %1, #0\n"
814 +" bne 1b"
815 +
816 +#ifdef CONFIG_PAX_REFCOUNT
817 +"\n4:\n"
818 + _ASM_EXTABLE(2b, 4b)
819 +#endif
820 +
821 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
822 + : "r" (&v->counter), "Ir" (i)
823 + : "cc");
824 +}
825 +
826 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
827 +{
828 + unsigned long tmp;
829 + int result;
830 +
831 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
832 "1: ldrex %0, [%3]\n"
833 " sub %0, %0, %4\n"
834 " strex %1, %0, [%3]\n"
835 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
836 smp_mb();
837
838 __asm__ __volatile__("@ atomic_sub_return\n"
839 -"1: ldrex %0, [%3]\n"
840 -" sub %0, %0, %4\n"
841 +"1: ldrex %1, [%3]\n"
842 +" sub %0, %1, %4\n"
843 +
844 +#ifdef CONFIG_PAX_REFCOUNT
845 +" bvc 3f\n"
846 +" mov %0, %1\n"
847 +"2: bkpt 0xf103\n"
848 +"3:\n"
849 +#endif
850 +
851 " strex %1, %0, [%3]\n"
852 " teq %1, #0\n"
853 " bne 1b"
854 +
855 +#ifdef CONFIG_PAX_REFCOUNT
856 +"\n4:\n"
857 + _ASM_EXTABLE(2b, 4b)
858 +#endif
859 +
860 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
861 : "r" (&v->counter), "Ir" (i)
862 : "cc");
863 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
864 return oldval;
865 }
866
867 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
868 +{
869 + unsigned long oldval, res;
870 +
871 + smp_mb();
872 +
873 + do {
874 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
875 + "ldrex %1, [%3]\n"
876 + "mov %0, #0\n"
877 + "teq %1, %4\n"
878 + "strexeq %0, %5, [%3]\n"
879 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
880 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
881 + : "cc");
882 + } while (res);
883 +
884 + smp_mb();
885 +
886 + return oldval;
887 +}
888 +
889 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
890 {
891 unsigned long tmp, tmp2;
892 @@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
893
894 return val;
895 }
896 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
897 #define atomic_add(i, v) (void) atomic_add_return(i, v)
898 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
899
900 static inline int atomic_sub_return(int i, atomic_t *v)
901 {
902 @@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
903
904 return val;
905 }
906 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
907 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
908 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
909
910 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
911 {
912 @@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
913
914 return ret;
915 }
916 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
917
918 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
919 {
920 @@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
921 #endif /* __LINUX_ARM_ARCH__ */
922
923 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
924 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
925 +{
926 + return xchg(&v->counter, new);
927 +}
928
929 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
930 {
931 @@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
932 }
933
934 #define atomic_inc(v) atomic_add(1, v)
935 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
936 +{
937 + atomic_add_unchecked(1, v);
938 +}
939 #define atomic_dec(v) atomic_sub(1, v)
940 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
941 +{
942 + atomic_sub_unchecked(1, v);
943 +}
944
945 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
946 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
947 +{
948 + return atomic_add_return_unchecked(1, v) == 0;
949 +}
950 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
951 #define atomic_inc_return(v) (atomic_add_return(1, v))
952 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
953 +{
954 + return atomic_add_return_unchecked(1, v);
955 +}
956 #define atomic_dec_return(v) (atomic_sub_return(1, v))
957 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
958
959 @@ -239,6 +406,14 @@ typedef struct {
960 u64 __aligned(8) counter;
961 } atomic64_t;
962
963 +#ifdef CONFIG_PAX_REFCOUNT
964 +typedef struct {
965 + u64 __aligned(8) counter;
966 +} atomic64_unchecked_t;
967 +#else
968 +typedef atomic64_t atomic64_unchecked_t;
969 +#endif
970 +
971 #define ATOMIC64_INIT(i) { (i) }
972
973 static inline u64 atomic64_read(atomic64_t *v)
974 @@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
975 return result;
976 }
977
978 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
979 +{
980 + u64 result;
981 +
982 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
983 +" ldrexd %0, %H0, [%1]"
984 + : "=&r" (result)
985 + : "r" (&v->counter), "Qo" (v->counter)
986 + );
987 +
988 + return result;
989 +}
990 +
991 static inline void atomic64_set(atomic64_t *v, u64 i)
992 {
993 u64 tmp;
994 @@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
995 : "cc");
996 }
997
998 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
999 +{
1000 + u64 tmp;
1001 +
1002 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1003 +"1: ldrexd %0, %H0, [%2]\n"
1004 +" strexd %0, %3, %H3, [%2]\n"
1005 +" teq %0, #0\n"
1006 +" bne 1b"
1007 + : "=&r" (tmp), "=Qo" (v->counter)
1008 + : "r" (&v->counter), "r" (i)
1009 + : "cc");
1010 +}
1011 +
1012 static inline void atomic64_add(u64 i, atomic64_t *v)
1013 {
1014 u64 result;
1015 @@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1016 __asm__ __volatile__("@ atomic64_add\n"
1017 "1: ldrexd %0, %H0, [%3]\n"
1018 " adds %0, %0, %4\n"
1019 +" adcs %H0, %H0, %H4\n"
1020 +
1021 +#ifdef CONFIG_PAX_REFCOUNT
1022 +" bvc 3f\n"
1023 +"2: bkpt 0xf103\n"
1024 +"3:\n"
1025 +#endif
1026 +
1027 +" strexd %1, %0, %H0, [%3]\n"
1028 +" teq %1, #0\n"
1029 +" bne 1b"
1030 +
1031 +#ifdef CONFIG_PAX_REFCOUNT
1032 +"\n4:\n"
1033 + _ASM_EXTABLE(2b, 4b)
1034 +#endif
1035 +
1036 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1037 + : "r" (&v->counter), "r" (i)
1038 + : "cc");
1039 +}
1040 +
1041 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1042 +{
1043 + u64 result;
1044 + unsigned long tmp;
1045 +
1046 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1047 +"1: ldrexd %0, %H0, [%3]\n"
1048 +" adds %0, %0, %4\n"
1049 " adc %H0, %H0, %H4\n"
1050 " strexd %1, %0, %H0, [%3]\n"
1051 " teq %1, #0\n"
1052 @@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1053
1054 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1055 {
1056 - u64 result;
1057 - unsigned long tmp;
1058 + u64 result, tmp;
1059
1060 smp_mb();
1061
1062 __asm__ __volatile__("@ atomic64_add_return\n"
1063 +"1: ldrexd %1, %H1, [%3]\n"
1064 +" adds %0, %1, %4\n"
1065 +" adcs %H0, %H1, %H4\n"
1066 +
1067 +#ifdef CONFIG_PAX_REFCOUNT
1068 +" bvc 3f\n"
1069 +" mov %0, %1\n"
1070 +" mov %H0, %H1\n"
1071 +"2: bkpt 0xf103\n"
1072 +"3:\n"
1073 +#endif
1074 +
1075 +" strexd %1, %0, %H0, [%3]\n"
1076 +" teq %1, #0\n"
1077 +" bne 1b"
1078 +
1079 +#ifdef CONFIG_PAX_REFCOUNT
1080 +"\n4:\n"
1081 + _ASM_EXTABLE(2b, 4b)
1082 +#endif
1083 +
1084 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1085 + : "r" (&v->counter), "r" (i)
1086 + : "cc");
1087 +
1088 + smp_mb();
1089 +
1090 + return result;
1091 +}
1092 +
1093 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1094 +{
1095 + u64 result;
1096 + unsigned long tmp;
1097 +
1098 + smp_mb();
1099 +
1100 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1101 "1: ldrexd %0, %H0, [%3]\n"
1102 " adds %0, %0, %4\n"
1103 " adc %H0, %H0, %H4\n"
1104 @@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1105 __asm__ __volatile__("@ atomic64_sub\n"
1106 "1: ldrexd %0, %H0, [%3]\n"
1107 " subs %0, %0, %4\n"
1108 +" sbcs %H0, %H0, %H4\n"
1109 +
1110 +#ifdef CONFIG_PAX_REFCOUNT
1111 +" bvc 3f\n"
1112 +"2: bkpt 0xf103\n"
1113 +"3:\n"
1114 +#endif
1115 +
1116 +" strexd %1, %0, %H0, [%3]\n"
1117 +" teq %1, #0\n"
1118 +" bne 1b"
1119 +
1120 +#ifdef CONFIG_PAX_REFCOUNT
1121 +"\n4:\n"
1122 + _ASM_EXTABLE(2b, 4b)
1123 +#endif
1124 +
1125 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1126 + : "r" (&v->counter), "r" (i)
1127 + : "cc");
1128 +}
1129 +
1130 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1131 +{
1132 + u64 result;
1133 + unsigned long tmp;
1134 +
1135 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1136 +"1: ldrexd %0, %H0, [%3]\n"
1137 +" subs %0, %0, %4\n"
1138 " sbc %H0, %H0, %H4\n"
1139 " strexd %1, %0, %H0, [%3]\n"
1140 " teq %1, #0\n"
1141 @@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1142
1143 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1144 {
1145 - u64 result;
1146 - unsigned long tmp;
1147 + u64 result, tmp;
1148
1149 smp_mb();
1150
1151 __asm__ __volatile__("@ atomic64_sub_return\n"
1152 -"1: ldrexd %0, %H0, [%3]\n"
1153 -" subs %0, %0, %4\n"
1154 -" sbc %H0, %H0, %H4\n"
1155 +"1: ldrexd %1, %H1, [%3]\n"
1156 +" subs %0, %1, %4\n"
1157 +" sbc %H0, %H1, %H4\n"
1158 +
1159 +#ifdef CONFIG_PAX_REFCOUNT
1160 +" bvc 3f\n"
1161 +" mov %0, %1\n"
1162 +" mov %H0, %H1\n"
1163 +"2: bkpt 0xf103\n"
1164 +"3:\n"
1165 +#endif
1166 +
1167 " strexd %1, %0, %H0, [%3]\n"
1168 " teq %1, #0\n"
1169 " bne 1b"
1170 +
1171 +#ifdef CONFIG_PAX_REFCOUNT
1172 +"\n4:\n"
1173 + _ASM_EXTABLE(2b, 4b)
1174 +#endif
1175 +
1176 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1177 : "r" (&v->counter), "r" (i)
1178 : "cc");
1179 @@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1180 return oldval;
1181 }
1182
1183 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1184 +{
1185 + u64 oldval;
1186 + unsigned long res;
1187 +
1188 + smp_mb();
1189 +
1190 + do {
1191 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1192 + "ldrexd %1, %H1, [%3]\n"
1193 + "mov %0, #0\n"
1194 + "teq %1, %4\n"
1195 + "teqeq %H1, %H4\n"
1196 + "strexdeq %0, %5, %H5, [%3]"
1197 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1198 + : "r" (&ptr->counter), "r" (old), "r" (new)
1199 + : "cc");
1200 + } while (res);
1201 +
1202 + smp_mb();
1203 +
1204 + return oldval;
1205 +}
1206 +
1207 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1208 {
1209 u64 result;
1210 @@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1211
1212 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1213 {
1214 - u64 result;
1215 - unsigned long tmp;
1216 + u64 result, tmp;
1217
1218 smp_mb();
1219
1220 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1221 -"1: ldrexd %0, %H0, [%3]\n"
1222 -" subs %0, %0, #1\n"
1223 -" sbc %H0, %H0, #0\n"
1224 +"1: ldrexd %1, %H1, [%3]\n"
1225 +" subs %0, %1, #1\n"
1226 +" sbc %H0, %H1, #0\n"
1227 +
1228 +#ifdef CONFIG_PAX_REFCOUNT
1229 +" bvc 3f\n"
1230 +" mov %0, %1\n"
1231 +" mov %H0, %H1\n"
1232 +"2: bkpt 0xf103\n"
1233 +"3:\n"
1234 +#endif
1235 +
1236 " teq %H0, #0\n"
1237 -" bmi 2f\n"
1238 +" bmi 4f\n"
1239 " strexd %1, %0, %H0, [%3]\n"
1240 " teq %1, #0\n"
1241 " bne 1b\n"
1242 -"2:"
1243 +"4:\n"
1244 +
1245 +#ifdef CONFIG_PAX_REFCOUNT
1246 + _ASM_EXTABLE(2b, 4b)
1247 +#endif
1248 +
1249 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1250 : "r" (&v->counter)
1251 : "cc");
1252 @@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1253 " teq %0, %5\n"
1254 " teqeq %H0, %H5\n"
1255 " moveq %1, #0\n"
1256 -" beq 2f\n"
1257 +" beq 4f\n"
1258 " adds %0, %0, %6\n"
1259 " adc %H0, %H0, %H6\n"
1260 +
1261 +#ifdef CONFIG_PAX_REFCOUNT
1262 +" bvc 3f\n"
1263 +"2: bkpt 0xf103\n"
1264 +"3:\n"
1265 +#endif
1266 +
1267 " strexd %2, %0, %H0, [%4]\n"
1268 " teq %2, #0\n"
1269 " bne 1b\n"
1270 -"2:"
1271 +"4:\n"
1272 +
1273 +#ifdef CONFIG_PAX_REFCOUNT
1274 + _ASM_EXTABLE(2b, 4b)
1275 +#endif
1276 +
1277 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1278 : "r" (&v->counter), "r" (u), "r" (a)
1279 : "cc");
1280 @@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1281
1282 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1283 #define atomic64_inc(v) atomic64_add(1LL, (v))
1284 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1285 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1286 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1287 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1288 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1289 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1290 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1291 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1292 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1294 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1295 index 75fe66b..2255c86 100644
1296 --- a/arch/arm/include/asm/cache.h
1297 +++ b/arch/arm/include/asm/cache.h
1298 @@ -4,8 +4,10 @@
1299 #ifndef __ASMARM_CACHE_H
1300 #define __ASMARM_CACHE_H
1301
1302 +#include <linux/const.h>
1303 +
1304 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1305 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1306 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1307
1308 /*
1309 * Memory returned by kmalloc() may be used for DMA, so we must make
1310 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1311 index d5d8d5c..ad92c96 100644
1312 --- a/arch/arm/include/asm/cacheflush.h
1313 +++ b/arch/arm/include/asm/cacheflush.h
1314 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1315 void (*dma_unmap_area)(const void *, size_t, int);
1316
1317 void (*dma_flush_range)(const void *, const void *);
1318 -};
1319 +} __no_const;
1320
1321 /*
1322 * Select the calling method
1323 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1324 index 0e9ce8d..6ef1e03 100644
1325 --- a/arch/arm/include/asm/elf.h
1326 +++ b/arch/arm/include/asm/elf.h
1327 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1328 the loader. We need to make sure that it is out of the way of the program
1329 that it will "exec", and that there is sufficient room for the brk. */
1330
1331 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1332 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1333 +
1334 +#ifdef CONFIG_PAX_ASLR
1335 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1336 +
1337 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1338 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1339 +#endif
1340
1341 /* When the program starts, a1 contains a pointer to a function to be
1342 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1343 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1344 extern void elf_set_personality(const struct elf32_hdr *);
1345 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1346
1347 -struct mm_struct;
1348 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1349 -#define arch_randomize_brk arch_randomize_brk
1350 -
1351 extern int vectors_user_mapping(void);
1352 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1353 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1354 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1355 index e51b1e8..32a3113 100644
1356 --- a/arch/arm/include/asm/kmap_types.h
1357 +++ b/arch/arm/include/asm/kmap_types.h
1358 @@ -21,6 +21,7 @@ enum km_type {
1359 KM_L1_CACHE,
1360 KM_L2_CACHE,
1361 KM_KDB,
1362 + KM_CLEARPAGE,
1363 KM_TYPE_NR
1364 };
1365
1366 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1367 index 53426c6..c7baff3 100644
1368 --- a/arch/arm/include/asm/outercache.h
1369 +++ b/arch/arm/include/asm/outercache.h
1370 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1371 #endif
1372 void (*set_debug)(unsigned long);
1373 void (*resume)(void);
1374 -};
1375 +} __no_const;
1376
1377 #ifdef CONFIG_OUTER_CACHE
1378
1379 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1380 index 97b440c..b7ff179 100644
1381 --- a/arch/arm/include/asm/page.h
1382 +++ b/arch/arm/include/asm/page.h
1383 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1384 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1385 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1386 unsigned long vaddr, struct vm_area_struct *vma);
1387 -};
1388 +} __no_const;
1389
1390 #ifdef MULTI_USER
1391 extern struct cpu_user_fns cpu_user;
1392 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1393 index e4c96cc..1145653 100644
1394 --- a/arch/arm/include/asm/system.h
1395 +++ b/arch/arm/include/asm/system.h
1396 @@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1397
1398 #define xchg(ptr,x) \
1399 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1400 +#define xchg_unchecked(ptr,x) \
1401 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1402
1403 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1404
1405 @@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1406
1407 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1408
1409 +#define _ASM_EXTABLE(from, to) \
1410 +" .pushsection __ex_table,\"a\"\n"\
1411 +" .align 3\n" \
1412 +" .long " #from ", " #to"\n" \
1413 +" .popsection"
1414 +
1415 +
1416 #endif /* __ASSEMBLY__ */
1417
1418 #define arch_align_stack(x) (x)
1419 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1420 index 2958976..12ccac4 100644
1421 --- a/arch/arm/include/asm/uaccess.h
1422 +++ b/arch/arm/include/asm/uaccess.h
1423 @@ -22,6 +22,8 @@
1424 #define VERIFY_READ 0
1425 #define VERIFY_WRITE 1
1426
1427 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1428 +
1429 /*
1430 * The exception table consists of pairs of addresses: the first is the
1431 * address of an instruction that is allowed to fault, and the second is
1432 @@ -387,8 +389,23 @@ do { \
1433
1434
1435 #ifdef CONFIG_MMU
1436 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1437 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1438 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1439 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1440 +
1441 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1442 +{
1443 + if (!__builtin_constant_p(n))
1444 + check_object_size(to, n, false);
1445 + return ___copy_from_user(to, from, n);
1446 +}
1447 +
1448 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1449 +{
1450 + if (!__builtin_constant_p(n))
1451 + check_object_size(from, n, true);
1452 + return ___copy_to_user(to, from, n);
1453 +}
1454 +
1455 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1456 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1457 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1458 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1459
1460 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1461 {
1462 + if ((long)n < 0)
1463 + return n;
1464 +
1465 if (access_ok(VERIFY_READ, from, n))
1466 n = __copy_from_user(to, from, n);
1467 else /* security hole - plug it */
1468 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1469
1470 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1471 {
1472 + if ((long)n < 0)
1473 + return n;
1474 +
1475 if (access_ok(VERIFY_WRITE, to, n))
1476 n = __copy_to_user(to, from, n);
1477 return n;
1478 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1479 index 5b0bce6..becd81c 100644
1480 --- a/arch/arm/kernel/armksyms.c
1481 +++ b/arch/arm/kernel/armksyms.c
1482 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1483 #ifdef CONFIG_MMU
1484 EXPORT_SYMBOL(copy_page);
1485
1486 -EXPORT_SYMBOL(__copy_from_user);
1487 -EXPORT_SYMBOL(__copy_to_user);
1488 +EXPORT_SYMBOL(___copy_from_user);
1489 +EXPORT_SYMBOL(___copy_to_user);
1490 EXPORT_SYMBOL(__clear_user);
1491
1492 EXPORT_SYMBOL(__get_user_1);
1493 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1494 index 971d65c..cc936fb 100644
1495 --- a/arch/arm/kernel/process.c
1496 +++ b/arch/arm/kernel/process.c
1497 @@ -28,7 +28,6 @@
1498 #include <linux/tick.h>
1499 #include <linux/utsname.h>
1500 #include <linux/uaccess.h>
1501 -#include <linux/random.h>
1502 #include <linux/hw_breakpoint.h>
1503 #include <linux/cpuidle.h>
1504
1505 @@ -273,9 +272,10 @@ void machine_power_off(void)
1506 machine_shutdown();
1507 if (pm_power_off)
1508 pm_power_off();
1509 + BUG();
1510 }
1511
1512 -void machine_restart(char *cmd)
1513 +__noreturn void machine_restart(char *cmd)
1514 {
1515 machine_shutdown();
1516
1517 @@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1518 return 0;
1519 }
1520
1521 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1522 -{
1523 - unsigned long range_end = mm->brk + 0x02000000;
1524 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1525 -}
1526 -
1527 #ifdef CONFIG_MMU
1528 /*
1529 * The vectors page is always readable from user space for the
1530 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1531 index a255c39..4a19b25 100644
1532 --- a/arch/arm/kernel/setup.c
1533 +++ b/arch/arm/kernel/setup.c
1534 @@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1535 struct cpu_tlb_fns cpu_tlb __read_mostly;
1536 #endif
1537 #ifdef MULTI_USER
1538 -struct cpu_user_fns cpu_user __read_mostly;
1539 +struct cpu_user_fns cpu_user __read_only;
1540 #endif
1541 #ifdef MULTI_CACHE
1542 -struct cpu_cache_fns cpu_cache __read_mostly;
1543 +struct cpu_cache_fns cpu_cache __read_only;
1544 #endif
1545 #ifdef CONFIG_OUTER_CACHE
1546 -struct outer_cache_fns outer_cache __read_mostly;
1547 +struct outer_cache_fns outer_cache __read_only;
1548 EXPORT_SYMBOL(outer_cache);
1549 #endif
1550
1551 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1552 index f84dfe6..13e94f7 100644
1553 --- a/arch/arm/kernel/traps.c
1554 +++ b/arch/arm/kernel/traps.c
1555 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1556
1557 static DEFINE_RAW_SPINLOCK(die_lock);
1558
1559 +extern void gr_handle_kernel_exploit(void);
1560 +
1561 /*
1562 * This function is protected against re-entrancy.
1563 */
1564 @@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1565 panic("Fatal exception in interrupt");
1566 if (panic_on_oops)
1567 panic("Fatal exception");
1568 +
1569 + gr_handle_kernel_exploit();
1570 +
1571 if (ret != NOTIFY_STOP)
1572 do_exit(SIGSEGV);
1573 }
1574 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1575 index 66a477a..bee61d3 100644
1576 --- a/arch/arm/lib/copy_from_user.S
1577 +++ b/arch/arm/lib/copy_from_user.S
1578 @@ -16,7 +16,7 @@
1579 /*
1580 * Prototype:
1581 *
1582 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1583 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1584 *
1585 * Purpose:
1586 *
1587 @@ -84,11 +84,11 @@
1588
1589 .text
1590
1591 -ENTRY(__copy_from_user)
1592 +ENTRY(___copy_from_user)
1593
1594 #include "copy_template.S"
1595
1596 -ENDPROC(__copy_from_user)
1597 +ENDPROC(___copy_from_user)
1598
1599 .pushsection .fixup,"ax"
1600 .align 0
1601 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1602 index 6ee2f67..d1cce76 100644
1603 --- a/arch/arm/lib/copy_page.S
1604 +++ b/arch/arm/lib/copy_page.S
1605 @@ -10,6 +10,7 @@
1606 * ASM optimised string functions
1607 */
1608 #include <linux/linkage.h>
1609 +#include <linux/const.h>
1610 #include <asm/assembler.h>
1611 #include <asm/asm-offsets.h>
1612 #include <asm/cache.h>
1613 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1614 index d066df6..df28194 100644
1615 --- a/arch/arm/lib/copy_to_user.S
1616 +++ b/arch/arm/lib/copy_to_user.S
1617 @@ -16,7 +16,7 @@
1618 /*
1619 * Prototype:
1620 *
1621 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1622 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1623 *
1624 * Purpose:
1625 *
1626 @@ -88,11 +88,11 @@
1627 .text
1628
1629 ENTRY(__copy_to_user_std)
1630 -WEAK(__copy_to_user)
1631 +WEAK(___copy_to_user)
1632
1633 #include "copy_template.S"
1634
1635 -ENDPROC(__copy_to_user)
1636 +ENDPROC(___copy_to_user)
1637 ENDPROC(__copy_to_user_std)
1638
1639 .pushsection .fixup,"ax"
1640 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1641 index 5c908b1..e712687 100644
1642 --- a/arch/arm/lib/uaccess.S
1643 +++ b/arch/arm/lib/uaccess.S
1644 @@ -20,7 +20,7 @@
1645
1646 #define PAGE_SHIFT 12
1647
1648 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1649 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1650 * Purpose : copy a block to user memory from kernel memory
1651 * Params : to - user memory
1652 * : from - kernel memory
1653 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1654 sub r2, r2, ip
1655 b .Lc2u_dest_aligned
1656
1657 -ENTRY(__copy_to_user)
1658 +ENTRY(___copy_to_user)
1659 stmfd sp!, {r2, r4 - r7, lr}
1660 cmp r2, #4
1661 blt .Lc2u_not_enough
1662 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1663 ldrgtb r3, [r1], #0
1664 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1665 b .Lc2u_finished
1666 -ENDPROC(__copy_to_user)
1667 +ENDPROC(___copy_to_user)
1668
1669 .pushsection .fixup,"ax"
1670 .align 0
1671 9001: ldmfd sp!, {r0, r4 - r7, pc}
1672 .popsection
1673
1674 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1675 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1676 * Purpose : copy a block from user memory to kernel memory
1677 * Params : to - kernel memory
1678 * : from - user memory
1679 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1680 sub r2, r2, ip
1681 b .Lcfu_dest_aligned
1682
1683 -ENTRY(__copy_from_user)
1684 +ENTRY(___copy_from_user)
1685 stmfd sp!, {r0, r2, r4 - r7, lr}
1686 cmp r2, #4
1687 blt .Lcfu_not_enough
1688 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1689 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1690 strgtb r3, [r0], #1
1691 b .Lcfu_finished
1692 -ENDPROC(__copy_from_user)
1693 +ENDPROC(___copy_from_user)
1694
1695 .pushsection .fixup,"ax"
1696 .align 0
1697 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1698 index 025f742..8432b08 100644
1699 --- a/arch/arm/lib/uaccess_with_memcpy.c
1700 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1701 @@ -104,7 +104,7 @@ out:
1702 }
1703
1704 unsigned long
1705 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1706 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1707 {
1708 /*
1709 * This test is stubbed out of the main function above to keep
1710 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1711 index 6722627..8f97548c 100644
1712 --- a/arch/arm/mach-omap2/board-n8x0.c
1713 +++ b/arch/arm/mach-omap2/board-n8x0.c
1714 @@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1715 }
1716 #endif
1717
1718 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1719 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1720 .late_init = n8x0_menelaus_late_init,
1721 };
1722
1723 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1724 index 2b2d51c..0127490 100644
1725 --- a/arch/arm/mach-ux500/mbox-db5500.c
1726 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1727 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1728 return sprintf(buf, "0x%X\n", mbox_value);
1729 }
1730
1731 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1732 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1733
1734 static int mbox_show(struct seq_file *s, void *data)
1735 {
1736 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1737 index bb7eac3..3bade16 100644
1738 --- a/arch/arm/mm/fault.c
1739 +++ b/arch/arm/mm/fault.c
1740 @@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1741 }
1742 #endif
1743
1744 +#ifdef CONFIG_PAX_PAGEEXEC
1745 + if (fsr & FSR_LNX_PF) {
1746 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1747 + do_group_exit(SIGKILL);
1748 + }
1749 +#endif
1750 +
1751 tsk->thread.address = addr;
1752 tsk->thread.error_code = fsr;
1753 tsk->thread.trap_no = 14;
1754 @@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1755 }
1756 #endif /* CONFIG_MMU */
1757
1758 +#ifdef CONFIG_PAX_PAGEEXEC
1759 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1760 +{
1761 + long i;
1762 +
1763 + printk(KERN_ERR "PAX: bytes at PC: ");
1764 + for (i = 0; i < 20; i++) {
1765 + unsigned char c;
1766 + if (get_user(c, (__force unsigned char __user *)pc+i))
1767 + printk(KERN_CONT "?? ");
1768 + else
1769 + printk(KERN_CONT "%02x ", c);
1770 + }
1771 + printk("\n");
1772 +
1773 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1774 + for (i = -1; i < 20; i++) {
1775 + unsigned long c;
1776 + if (get_user(c, (__force unsigned long __user *)sp+i))
1777 + printk(KERN_CONT "???????? ");
1778 + else
1779 + printk(KERN_CONT "%08lx ", c);
1780 + }
1781 + printk("\n");
1782 +}
1783 +#endif
1784 +
1785 /*
1786 * First Level Translation Fault Handler
1787 *
1788 @@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1789 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1790 struct siginfo info;
1791
1792 +#ifdef CONFIG_PAX_REFCOUNT
1793 + if (fsr_fs(ifsr) == 2) {
1794 + unsigned int bkpt;
1795 +
1796 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1797 + current->thread.error_code = ifsr;
1798 + current->thread.trap_no = 0;
1799 + pax_report_refcount_overflow(regs);
1800 + fixup_exception(regs);
1801 + return;
1802 + }
1803 + }
1804 +#endif
1805 +
1806 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1807 return;
1808
1809 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1810 index ce8cb19..3ec539d 100644
1811 --- a/arch/arm/mm/mmap.c
1812 +++ b/arch/arm/mm/mmap.c
1813 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1814 if (len > TASK_SIZE)
1815 return -ENOMEM;
1816
1817 +#ifdef CONFIG_PAX_RANDMMAP
1818 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1819 +#endif
1820 +
1821 if (addr) {
1822 if (do_align)
1823 addr = COLOUR_ALIGN(addr, pgoff);
1824 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1825 addr = PAGE_ALIGN(addr);
1826
1827 vma = find_vma(mm, addr);
1828 - if (TASK_SIZE - len >= addr &&
1829 - (!vma || addr + len <= vma->vm_start))
1830 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1831 return addr;
1832 }
1833 if (len > mm->cached_hole_size) {
1834 - start_addr = addr = mm->free_area_cache;
1835 + start_addr = addr = mm->free_area_cache;
1836 } else {
1837 - start_addr = addr = mm->mmap_base;
1838 - mm->cached_hole_size = 0;
1839 + start_addr = addr = mm->mmap_base;
1840 + mm->cached_hole_size = 0;
1841 }
1842
1843 full_search:
1844 @@ -124,14 +127,14 @@ full_search:
1845 * Start a new search - just in case we missed
1846 * some holes.
1847 */
1848 - if (start_addr != TASK_UNMAPPED_BASE) {
1849 - start_addr = addr = TASK_UNMAPPED_BASE;
1850 + if (start_addr != mm->mmap_base) {
1851 + start_addr = addr = mm->mmap_base;
1852 mm->cached_hole_size = 0;
1853 goto full_search;
1854 }
1855 return -ENOMEM;
1856 }
1857 - if (!vma || addr + len <= vma->vm_start) {
1858 + if (check_heap_stack_gap(vma, addr, len)) {
1859 /*
1860 * Remember the place where we stopped the search:
1861 */
1862 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1863
1864 if (mmap_is_legacy()) {
1865 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1866 +
1867 +#ifdef CONFIG_PAX_RANDMMAP
1868 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1869 + mm->mmap_base += mm->delta_mmap;
1870 +#endif
1871 +
1872 mm->get_unmapped_area = arch_get_unmapped_area;
1873 mm->unmap_area = arch_unmap_area;
1874 } else {
1875 mm->mmap_base = mmap_base(random_factor);
1876 +
1877 +#ifdef CONFIG_PAX_RANDMMAP
1878 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1879 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1880 +#endif
1881 +
1882 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1883 mm->unmap_area = arch_unmap_area_topdown;
1884 }
1885 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1886 index 71a6827..e7fbc23 100644
1887 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1888 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1889 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1890 int (*started)(unsigned ch);
1891 int (*flush)(unsigned ch);
1892 int (*stop)(unsigned ch);
1893 -};
1894 +} __no_const;
1895
1896 extern void *samsung_dmadev_get_ops(void);
1897 extern void *s3c_dma_get_ops(void);
1898 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1899 index 5f28cae..3d23723 100644
1900 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1901 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1902 @@ -14,7 +14,7 @@
1903 struct s5p_ehci_platdata {
1904 int (*phy_init)(struct platform_device *pdev, int type);
1905 int (*phy_exit)(struct platform_device *pdev, int type);
1906 -};
1907 +} __no_const;
1908
1909 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1910
1911 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1912 index c3a58a1..78fbf54 100644
1913 --- a/arch/avr32/include/asm/cache.h
1914 +++ b/arch/avr32/include/asm/cache.h
1915 @@ -1,8 +1,10 @@
1916 #ifndef __ASM_AVR32_CACHE_H
1917 #define __ASM_AVR32_CACHE_H
1918
1919 +#include <linux/const.h>
1920 +
1921 #define L1_CACHE_SHIFT 5
1922 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1923 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1924
1925 /*
1926 * Memory returned by kmalloc() may be used for DMA, so we must make
1927 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1928 index 3b3159b..425ea94 100644
1929 --- a/arch/avr32/include/asm/elf.h
1930 +++ b/arch/avr32/include/asm/elf.h
1931 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1932 the loader. We need to make sure that it is out of the way of the program
1933 that it will "exec", and that there is sufficient room for the brk. */
1934
1935 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1936 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1937
1938 +#ifdef CONFIG_PAX_ASLR
1939 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1940 +
1941 +#define PAX_DELTA_MMAP_LEN 15
1942 +#define PAX_DELTA_STACK_LEN 15
1943 +#endif
1944
1945 /* This yields a mask that user programs can use to figure out what
1946 instruction set this CPU supports. This could be done in user space,
1947 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1948 index b7f5c68..556135c 100644
1949 --- a/arch/avr32/include/asm/kmap_types.h
1950 +++ b/arch/avr32/include/asm/kmap_types.h
1951 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1952 D(11) KM_IRQ1,
1953 D(12) KM_SOFTIRQ0,
1954 D(13) KM_SOFTIRQ1,
1955 -D(14) KM_TYPE_NR
1956 +D(14) KM_CLEARPAGE,
1957 +D(15) KM_TYPE_NR
1958 };
1959
1960 #undef D
1961 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1962 index f7040a1..db9f300 100644
1963 --- a/arch/avr32/mm/fault.c
1964 +++ b/arch/avr32/mm/fault.c
1965 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1966
1967 int exception_trace = 1;
1968
1969 +#ifdef CONFIG_PAX_PAGEEXEC
1970 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1971 +{
1972 + unsigned long i;
1973 +
1974 + printk(KERN_ERR "PAX: bytes at PC: ");
1975 + for (i = 0; i < 20; i++) {
1976 + unsigned char c;
1977 + if (get_user(c, (unsigned char *)pc+i))
1978 + printk(KERN_CONT "???????? ");
1979 + else
1980 + printk(KERN_CONT "%02x ", c);
1981 + }
1982 + printk("\n");
1983 +}
1984 +#endif
1985 +
1986 /*
1987 * This routine handles page faults. It determines the address and the
1988 * problem, and then passes it off to one of the appropriate routines.
1989 @@ -156,6 +173,16 @@ bad_area:
1990 up_read(&mm->mmap_sem);
1991
1992 if (user_mode(regs)) {
1993 +
1994 +#ifdef CONFIG_PAX_PAGEEXEC
1995 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1996 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1997 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1998 + do_group_exit(SIGKILL);
1999 + }
2000 + }
2001 +#endif
2002 +
2003 if (exception_trace && printk_ratelimit())
2004 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2005 "sp %08lx ecr %lu\n",
2006 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2007 index 568885a..f8008df 100644
2008 --- a/arch/blackfin/include/asm/cache.h
2009 +++ b/arch/blackfin/include/asm/cache.h
2010 @@ -7,6 +7,7 @@
2011 #ifndef __ARCH_BLACKFIN_CACHE_H
2012 #define __ARCH_BLACKFIN_CACHE_H
2013
2014 +#include <linux/const.h>
2015 #include <linux/linkage.h> /* for asmlinkage */
2016
2017 /*
2018 @@ -14,7 +15,7 @@
2019 * Blackfin loads 32 bytes for cache
2020 */
2021 #define L1_CACHE_SHIFT 5
2022 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2023 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2024 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2025
2026 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2027 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2028 index aea2718..3639a60 100644
2029 --- a/arch/cris/include/arch-v10/arch/cache.h
2030 +++ b/arch/cris/include/arch-v10/arch/cache.h
2031 @@ -1,8 +1,9 @@
2032 #ifndef _ASM_ARCH_CACHE_H
2033 #define _ASM_ARCH_CACHE_H
2034
2035 +#include <linux/const.h>
2036 /* Etrax 100LX have 32-byte cache-lines. */
2037 -#define L1_CACHE_BYTES 32
2038 #define L1_CACHE_SHIFT 5
2039 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2040
2041 #endif /* _ASM_ARCH_CACHE_H */
2042 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2043 index 1de779f..336fad3 100644
2044 --- a/arch/cris/include/arch-v32/arch/cache.h
2045 +++ b/arch/cris/include/arch-v32/arch/cache.h
2046 @@ -1,11 +1,12 @@
2047 #ifndef _ASM_CRIS_ARCH_CACHE_H
2048 #define _ASM_CRIS_ARCH_CACHE_H
2049
2050 +#include <linux/const.h>
2051 #include <arch/hwregs/dma.h>
2052
2053 /* A cache-line is 32 bytes. */
2054 -#define L1_CACHE_BYTES 32
2055 #define L1_CACHE_SHIFT 5
2056 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2057
2058 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2059
2060 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2061 index 0d8a7d6..d0c9ff5 100644
2062 --- a/arch/frv/include/asm/atomic.h
2063 +++ b/arch/frv/include/asm/atomic.h
2064 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2065 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2066 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2067
2068 +#define atomic64_read_unchecked(v) atomic64_read(v)
2069 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2070 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2071 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2072 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2073 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2074 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2075 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2076 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2077 +
2078 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2079 {
2080 int c, old;
2081 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2082 index 2797163..c2a401d 100644
2083 --- a/arch/frv/include/asm/cache.h
2084 +++ b/arch/frv/include/asm/cache.h
2085 @@ -12,10 +12,11 @@
2086 #ifndef __ASM_CACHE_H
2087 #define __ASM_CACHE_H
2088
2089 +#include <linux/const.h>
2090
2091 /* bytes per L1 cache line */
2092 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2093 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2094 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2095
2096 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2097 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2098 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2099 index f8e16b2..c73ff79 100644
2100 --- a/arch/frv/include/asm/kmap_types.h
2101 +++ b/arch/frv/include/asm/kmap_types.h
2102 @@ -23,6 +23,7 @@ enum km_type {
2103 KM_IRQ1,
2104 KM_SOFTIRQ0,
2105 KM_SOFTIRQ1,
2106 + KM_CLEARPAGE,
2107 KM_TYPE_NR
2108 };
2109
2110 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2111 index 385fd30..6c3d97e 100644
2112 --- a/arch/frv/mm/elf-fdpic.c
2113 +++ b/arch/frv/mm/elf-fdpic.c
2114 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2115 if (addr) {
2116 addr = PAGE_ALIGN(addr);
2117 vma = find_vma(current->mm, addr);
2118 - if (TASK_SIZE - len >= addr &&
2119 - (!vma || addr + len <= vma->vm_start))
2120 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2121 goto success;
2122 }
2123
2124 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2125 for (; vma; vma = vma->vm_next) {
2126 if (addr > limit)
2127 break;
2128 - if (addr + len <= vma->vm_start)
2129 + if (check_heap_stack_gap(vma, addr, len))
2130 goto success;
2131 addr = vma->vm_end;
2132 }
2133 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2134 for (; vma; vma = vma->vm_next) {
2135 if (addr > limit)
2136 break;
2137 - if (addr + len <= vma->vm_start)
2138 + if (check_heap_stack_gap(vma, addr, len))
2139 goto success;
2140 addr = vma->vm_end;
2141 }
2142 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2143 index c635028..6d9445a 100644
2144 --- a/arch/h8300/include/asm/cache.h
2145 +++ b/arch/h8300/include/asm/cache.h
2146 @@ -1,8 +1,10 @@
2147 #ifndef __ARCH_H8300_CACHE_H
2148 #define __ARCH_H8300_CACHE_H
2149
2150 +#include <linux/const.h>
2151 +
2152 /* bytes per L1 cache line */
2153 -#define L1_CACHE_BYTES 4
2154 +#define L1_CACHE_BYTES _AC(4,UL)
2155
2156 /* m68k-elf-gcc 2.95.2 doesn't like these */
2157
2158 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2159 index 0f01de2..d37d309 100644
2160 --- a/arch/hexagon/include/asm/cache.h
2161 +++ b/arch/hexagon/include/asm/cache.h
2162 @@ -21,9 +21,11 @@
2163 #ifndef __ASM_CACHE_H
2164 #define __ASM_CACHE_H
2165
2166 +#include <linux/const.h>
2167 +
2168 /* Bytes per L1 cache line */
2169 -#define L1_CACHE_SHIFT (5)
2170 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2171 +#define L1_CACHE_SHIFT 5
2172 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2173
2174 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2175 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2176 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2177 index 3fad89e..3047da5 100644
2178 --- a/arch/ia64/include/asm/atomic.h
2179 +++ b/arch/ia64/include/asm/atomic.h
2180 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2181 #define atomic64_inc(v) atomic64_add(1, (v))
2182 #define atomic64_dec(v) atomic64_sub(1, (v))
2183
2184 +#define atomic64_read_unchecked(v) atomic64_read(v)
2185 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2186 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2187 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2188 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2189 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2190 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2191 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2192 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2193 +
2194 /* Atomic operations are already serializing */
2195 #define smp_mb__before_atomic_dec() barrier()
2196 #define smp_mb__after_atomic_dec() barrier()
2197 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2198 index 988254a..e1ee885 100644
2199 --- a/arch/ia64/include/asm/cache.h
2200 +++ b/arch/ia64/include/asm/cache.h
2201 @@ -1,6 +1,7 @@
2202 #ifndef _ASM_IA64_CACHE_H
2203 #define _ASM_IA64_CACHE_H
2204
2205 +#include <linux/const.h>
2206
2207 /*
2208 * Copyright (C) 1998-2000 Hewlett-Packard Co
2209 @@ -9,7 +10,7 @@
2210
2211 /* Bytes per L1 (data) cache line. */
2212 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2213 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2214 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2215
2216 #ifdef CONFIG_SMP
2217 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2218 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2219 index b5298eb..67c6e62 100644
2220 --- a/arch/ia64/include/asm/elf.h
2221 +++ b/arch/ia64/include/asm/elf.h
2222 @@ -42,6 +42,13 @@
2223 */
2224 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2225
2226 +#ifdef CONFIG_PAX_ASLR
2227 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2228 +
2229 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2230 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2231 +#endif
2232 +
2233 #define PT_IA_64_UNWIND 0x70000001
2234
2235 /* IA-64 relocations: */
2236 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2237 index 1a97af3..7529d31 100644
2238 --- a/arch/ia64/include/asm/pgtable.h
2239 +++ b/arch/ia64/include/asm/pgtable.h
2240 @@ -12,7 +12,7 @@
2241 * David Mosberger-Tang <davidm@hpl.hp.com>
2242 */
2243
2244 -
2245 +#include <linux/const.h>
2246 #include <asm/mman.h>
2247 #include <asm/page.h>
2248 #include <asm/processor.h>
2249 @@ -143,6 +143,17 @@
2250 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2251 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2252 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2253 +
2254 +#ifdef CONFIG_PAX_PAGEEXEC
2255 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2256 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2257 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2258 +#else
2259 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2260 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2261 +# define PAGE_COPY_NOEXEC PAGE_COPY
2262 +#endif
2263 +
2264 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2265 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2266 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2267 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2268 index b77768d..e0795eb 100644
2269 --- a/arch/ia64/include/asm/spinlock.h
2270 +++ b/arch/ia64/include/asm/spinlock.h
2271 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2272 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2273
2274 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2275 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2276 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2277 }
2278
2279 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2280 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2281 index 449c8c0..432a3d2 100644
2282 --- a/arch/ia64/include/asm/uaccess.h
2283 +++ b/arch/ia64/include/asm/uaccess.h
2284 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2285 const void *__cu_from = (from); \
2286 long __cu_len = (n); \
2287 \
2288 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2289 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2290 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2291 __cu_len; \
2292 })
2293 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2294 long __cu_len = (n); \
2295 \
2296 __chk_user_ptr(__cu_from); \
2297 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2298 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2299 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2300 __cu_len; \
2301 })
2302 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2303 index 24603be..948052d 100644
2304 --- a/arch/ia64/kernel/module.c
2305 +++ b/arch/ia64/kernel/module.c
2306 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2307 void
2308 module_free (struct module *mod, void *module_region)
2309 {
2310 - if (mod && mod->arch.init_unw_table &&
2311 - module_region == mod->module_init) {
2312 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2313 unw_remove_unwind_table(mod->arch.init_unw_table);
2314 mod->arch.init_unw_table = NULL;
2315 }
2316 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2317 }
2318
2319 static inline int
2320 +in_init_rx (const struct module *mod, uint64_t addr)
2321 +{
2322 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2323 +}
2324 +
2325 +static inline int
2326 +in_init_rw (const struct module *mod, uint64_t addr)
2327 +{
2328 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2329 +}
2330 +
2331 +static inline int
2332 in_init (const struct module *mod, uint64_t addr)
2333 {
2334 - return addr - (uint64_t) mod->module_init < mod->init_size;
2335 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2336 +}
2337 +
2338 +static inline int
2339 +in_core_rx (const struct module *mod, uint64_t addr)
2340 +{
2341 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2342 +}
2343 +
2344 +static inline int
2345 +in_core_rw (const struct module *mod, uint64_t addr)
2346 +{
2347 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2348 }
2349
2350 static inline int
2351 in_core (const struct module *mod, uint64_t addr)
2352 {
2353 - return addr - (uint64_t) mod->module_core < mod->core_size;
2354 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2355 }
2356
2357 static inline int
2358 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2359 break;
2360
2361 case RV_BDREL:
2362 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2363 + if (in_init_rx(mod, val))
2364 + val -= (uint64_t) mod->module_init_rx;
2365 + else if (in_init_rw(mod, val))
2366 + val -= (uint64_t) mod->module_init_rw;
2367 + else if (in_core_rx(mod, val))
2368 + val -= (uint64_t) mod->module_core_rx;
2369 + else if (in_core_rw(mod, val))
2370 + val -= (uint64_t) mod->module_core_rw;
2371 break;
2372
2373 case RV_LTV:
2374 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2375 * addresses have been selected...
2376 */
2377 uint64_t gp;
2378 - if (mod->core_size > MAX_LTOFF)
2379 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2380 /*
2381 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2382 * at the end of the module.
2383 */
2384 - gp = mod->core_size - MAX_LTOFF / 2;
2385 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2386 else
2387 - gp = mod->core_size / 2;
2388 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2389 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2390 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2391 mod->arch.gp = gp;
2392 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2393 }
2394 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2395 index 609d500..7dde2a8 100644
2396 --- a/arch/ia64/kernel/sys_ia64.c
2397 +++ b/arch/ia64/kernel/sys_ia64.c
2398 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2399 if (REGION_NUMBER(addr) == RGN_HPAGE)
2400 addr = 0;
2401 #endif
2402 +
2403 +#ifdef CONFIG_PAX_RANDMMAP
2404 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2405 + addr = mm->free_area_cache;
2406 + else
2407 +#endif
2408 +
2409 if (!addr)
2410 addr = mm->free_area_cache;
2411
2412 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2413 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2414 /* At this point: (!vma || addr < vma->vm_end). */
2415 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2416 - if (start_addr != TASK_UNMAPPED_BASE) {
2417 + if (start_addr != mm->mmap_base) {
2418 /* Start a new search --- just in case we missed some holes. */
2419 - addr = TASK_UNMAPPED_BASE;
2420 + addr = mm->mmap_base;
2421 goto full_search;
2422 }
2423 return -ENOMEM;
2424 }
2425 - if (!vma || addr + len <= vma->vm_start) {
2426 + if (check_heap_stack_gap(vma, addr, len)) {
2427 /* Remember the address where we stopped this search: */
2428 mm->free_area_cache = addr + len;
2429 return addr;
2430 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2431 index 53c0ba0..2accdde 100644
2432 --- a/arch/ia64/kernel/vmlinux.lds.S
2433 +++ b/arch/ia64/kernel/vmlinux.lds.S
2434 @@ -199,7 +199,7 @@ SECTIONS {
2435 /* Per-cpu data: */
2436 . = ALIGN(PERCPU_PAGE_SIZE);
2437 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2438 - __phys_per_cpu_start = __per_cpu_load;
2439 + __phys_per_cpu_start = per_cpu_load;
2440 /*
2441 * ensure percpu data fits
2442 * into percpu page size
2443 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2444 index 20b3593..1ce77f0 100644
2445 --- a/arch/ia64/mm/fault.c
2446 +++ b/arch/ia64/mm/fault.c
2447 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2448 return pte_present(pte);
2449 }
2450
2451 +#ifdef CONFIG_PAX_PAGEEXEC
2452 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2453 +{
2454 + unsigned long i;
2455 +
2456 + printk(KERN_ERR "PAX: bytes at PC: ");
2457 + for (i = 0; i < 8; i++) {
2458 + unsigned int c;
2459 + if (get_user(c, (unsigned int *)pc+i))
2460 + printk(KERN_CONT "???????? ");
2461 + else
2462 + printk(KERN_CONT "%08x ", c);
2463 + }
2464 + printk("\n");
2465 +}
2466 +#endif
2467 +
2468 void __kprobes
2469 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2470 {
2471 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2472 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2473 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2474
2475 - if ((vma->vm_flags & mask) != mask)
2476 + if ((vma->vm_flags & mask) != mask) {
2477 +
2478 +#ifdef CONFIG_PAX_PAGEEXEC
2479 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2480 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2481 + goto bad_area;
2482 +
2483 + up_read(&mm->mmap_sem);
2484 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2485 + do_group_exit(SIGKILL);
2486 + }
2487 +#endif
2488 +
2489 goto bad_area;
2490
2491 + }
2492 +
2493 /*
2494 * If for any reason at all we couldn't handle the fault, make
2495 * sure we exit gracefully rather than endlessly redo the
2496 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2497 index 5ca674b..e0e1b70 100644
2498 --- a/arch/ia64/mm/hugetlbpage.c
2499 +++ b/arch/ia64/mm/hugetlbpage.c
2500 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2501 /* At this point: (!vmm || addr < vmm->vm_end). */
2502 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2503 return -ENOMEM;
2504 - if (!vmm || (addr + len) <= vmm->vm_start)
2505 + if (check_heap_stack_gap(vmm, addr, len))
2506 return addr;
2507 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2508 }
2509 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2510 index 13df239d..cb52116 100644
2511 --- a/arch/ia64/mm/init.c
2512 +++ b/arch/ia64/mm/init.c
2513 @@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2514 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2515 vma->vm_end = vma->vm_start + PAGE_SIZE;
2516 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2517 +
2518 +#ifdef CONFIG_PAX_PAGEEXEC
2519 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2520 + vma->vm_flags &= ~VM_EXEC;
2521 +
2522 +#ifdef CONFIG_PAX_MPROTECT
2523 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2524 + vma->vm_flags &= ~VM_MAYEXEC;
2525 +#endif
2526 +
2527 + }
2528 +#endif
2529 +
2530 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2531 down_write(&current->mm->mmap_sem);
2532 if (insert_vm_struct(current->mm, vma)) {
2533 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2534 index 40b3ee9..8c2c112 100644
2535 --- a/arch/m32r/include/asm/cache.h
2536 +++ b/arch/m32r/include/asm/cache.h
2537 @@ -1,8 +1,10 @@
2538 #ifndef _ASM_M32R_CACHE_H
2539 #define _ASM_M32R_CACHE_H
2540
2541 +#include <linux/const.h>
2542 +
2543 /* L1 cache line size */
2544 #define L1_CACHE_SHIFT 4
2545 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2546 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2547
2548 #endif /* _ASM_M32R_CACHE_H */
2549 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2550 index 82abd15..d95ae5d 100644
2551 --- a/arch/m32r/lib/usercopy.c
2552 +++ b/arch/m32r/lib/usercopy.c
2553 @@ -14,6 +14,9 @@
2554 unsigned long
2555 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2556 {
2557 + if ((long)n < 0)
2558 + return n;
2559 +
2560 prefetch(from);
2561 if (access_ok(VERIFY_WRITE, to, n))
2562 __copy_user(to,from,n);
2563 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2564 unsigned long
2565 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2566 {
2567 + if ((long)n < 0)
2568 + return n;
2569 +
2570 prefetchw(to);
2571 if (access_ok(VERIFY_READ, from, n))
2572 __copy_user_zeroing(to,from,n);
2573 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2574 index 0395c51..5f26031 100644
2575 --- a/arch/m68k/include/asm/cache.h
2576 +++ b/arch/m68k/include/asm/cache.h
2577 @@ -4,9 +4,11 @@
2578 #ifndef __ARCH_M68K_CACHE_H
2579 #define __ARCH_M68K_CACHE_H
2580
2581 +#include <linux/const.h>
2582 +
2583 /* bytes per L1 cache line */
2584 #define L1_CACHE_SHIFT 4
2585 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2586 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2587
2588 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2589
2590 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2591 index 4efe96a..60e8699 100644
2592 --- a/arch/microblaze/include/asm/cache.h
2593 +++ b/arch/microblaze/include/asm/cache.h
2594 @@ -13,11 +13,12 @@
2595 #ifndef _ASM_MICROBLAZE_CACHE_H
2596 #define _ASM_MICROBLAZE_CACHE_H
2597
2598 +#include <linux/const.h>
2599 #include <asm/registers.h>
2600
2601 #define L1_CACHE_SHIFT 5
2602 /* word-granular cache in microblaze */
2603 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2604 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2605
2606 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2607
2608 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2609 index 1d93f81..67794d0 100644
2610 --- a/arch/mips/include/asm/atomic.h
2611 +++ b/arch/mips/include/asm/atomic.h
2612 @@ -21,6 +21,10 @@
2613 #include <asm/war.h>
2614 #include <asm/system.h>
2615
2616 +#ifdef CONFIG_GENERIC_ATOMIC64
2617 +#include <asm-generic/atomic64.h>
2618 +#endif
2619 +
2620 #define ATOMIC_INIT(i) { (i) }
2621
2622 /*
2623 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2624 */
2625 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2626
2627 +#define atomic64_read_unchecked(v) atomic64_read(v)
2628 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2629 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2630 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2631 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2632 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2633 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2634 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2635 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2636 +
2637 #endif /* CONFIG_64BIT */
2638
2639 /*
2640 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2641 index b4db69f..8f3b093 100644
2642 --- a/arch/mips/include/asm/cache.h
2643 +++ b/arch/mips/include/asm/cache.h
2644 @@ -9,10 +9,11 @@
2645 #ifndef _ASM_CACHE_H
2646 #define _ASM_CACHE_H
2647
2648 +#include <linux/const.h>
2649 #include <kmalloc.h>
2650
2651 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2652 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2653 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2654
2655 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2656 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2657 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2658 index 455c0ac..ad65fbe 100644
2659 --- a/arch/mips/include/asm/elf.h
2660 +++ b/arch/mips/include/asm/elf.h
2661 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2662 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2663 #endif
2664
2665 +#ifdef CONFIG_PAX_ASLR
2666 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2667 +
2668 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2669 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2670 +#endif
2671 +
2672 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2673 struct linux_binprm;
2674 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2675 int uses_interp);
2676
2677 -struct mm_struct;
2678 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2679 -#define arch_randomize_brk arch_randomize_brk
2680 -
2681 #endif /* _ASM_ELF_H */
2682 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2683 index da9bd7d..91aa7ab 100644
2684 --- a/arch/mips/include/asm/page.h
2685 +++ b/arch/mips/include/asm/page.h
2686 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2687 #ifdef CONFIG_CPU_MIPS32
2688 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2689 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2690 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2691 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2692 #else
2693 typedef struct { unsigned long long pte; } pte_t;
2694 #define pte_val(x) ((x).pte)
2695 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2696 index 6018c80..7c37203 100644
2697 --- a/arch/mips/include/asm/system.h
2698 +++ b/arch/mips/include/asm/system.h
2699 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2700 */
2701 #define __ARCH_WANT_UNLOCKED_CTXSW
2702
2703 -extern unsigned long arch_align_stack(unsigned long sp);
2704 +#define arch_align_stack(x) ((x) & ~0xfUL)
2705
2706 #endif /* _ASM_SYSTEM_H */
2707 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2708 index 9fdd8bc..4bd7f1a 100644
2709 --- a/arch/mips/kernel/binfmt_elfn32.c
2710 +++ b/arch/mips/kernel/binfmt_elfn32.c
2711 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2712 #undef ELF_ET_DYN_BASE
2713 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2714
2715 +#ifdef CONFIG_PAX_ASLR
2716 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2717 +
2718 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2719 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2720 +#endif
2721 +
2722 #include <asm/processor.h>
2723 #include <linux/module.h>
2724 #include <linux/elfcore.h>
2725 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2726 index ff44823..97f8906 100644
2727 --- a/arch/mips/kernel/binfmt_elfo32.c
2728 +++ b/arch/mips/kernel/binfmt_elfo32.c
2729 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2730 #undef ELF_ET_DYN_BASE
2731 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2732
2733 +#ifdef CONFIG_PAX_ASLR
2734 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2735 +
2736 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2737 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2738 +#endif
2739 +
2740 #include <asm/processor.h>
2741
2742 /*
2743 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2744 index 7955409..ceaea7c 100644
2745 --- a/arch/mips/kernel/process.c
2746 +++ b/arch/mips/kernel/process.c
2747 @@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2748 out:
2749 return pc;
2750 }
2751 -
2752 -/*
2753 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2754 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2755 - */
2756 -unsigned long arch_align_stack(unsigned long sp)
2757 -{
2758 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2759 - sp -= get_random_int() & ~PAGE_MASK;
2760 -
2761 - return sp & ALMASK;
2762 -}
2763 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2764 index 69ebd58..e4bff83 100644
2765 --- a/arch/mips/mm/fault.c
2766 +++ b/arch/mips/mm/fault.c
2767 @@ -28,6 +28,23 @@
2768 #include <asm/highmem.h> /* For VMALLOC_END */
2769 #include <linux/kdebug.h>
2770
2771 +#ifdef CONFIG_PAX_PAGEEXEC
2772 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2773 +{
2774 + unsigned long i;
2775 +
2776 + printk(KERN_ERR "PAX: bytes at PC: ");
2777 + for (i = 0; i < 5; i++) {
2778 + unsigned int c;
2779 + if (get_user(c, (unsigned int *)pc+i))
2780 + printk(KERN_CONT "???????? ");
2781 + else
2782 + printk(KERN_CONT "%08x ", c);
2783 + }
2784 + printk("\n");
2785 +}
2786 +#endif
2787 +
2788 /*
2789 * This routine handles page faults. It determines the address,
2790 * and the problem, and then passes it off to one of the appropriate
2791 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2792 index 302d779..7d35bf8 100644
2793 --- a/arch/mips/mm/mmap.c
2794 +++ b/arch/mips/mm/mmap.c
2795 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2796 do_color_align = 1;
2797
2798 /* requesting a specific address */
2799 +
2800 +#ifdef CONFIG_PAX_RANDMMAP
2801 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2802 +#endif
2803 +
2804 if (addr) {
2805 if (do_color_align)
2806 addr = COLOUR_ALIGN(addr, pgoff);
2807 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2814 return addr;
2815 }
2816
2817 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2818 /* At this point: (!vma || addr < vma->vm_end). */
2819 if (TASK_SIZE - len < addr)
2820 return -ENOMEM;
2821 - if (!vma || addr + len <= vma->vm_start)
2822 + if (check_heap_stack_gap(vmm, addr, len))
2823 return addr;
2824 addr = vma->vm_end;
2825 if (do_color_align)
2826 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2827 /* make sure it can fit in the remaining address space */
2828 if (likely(addr > len)) {
2829 vma = find_vma(mm, addr - len);
2830 - if (!vma || addr <= vma->vm_start) {
2831 + if (check_heap_stack_gap(vmm, addr - len, len))
2832 /* cache the address as a hint for next time */
2833 return mm->free_area_cache = addr - len;
2834 }
2835 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2836 * return with success:
2837 */
2838 vma = find_vma(mm, addr);
2839 - if (likely(!vma || addr + len <= vma->vm_start)) {
2840 + if (check_heap_stack_gap(vmm, addr, len)) {
2841 /* cache the address as a hint for next time */
2842 return mm->free_area_cache = addr;
2843 }
2844 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2845 mm->unmap_area = arch_unmap_area_topdown;
2846 }
2847 }
2848 -
2849 -static inline unsigned long brk_rnd(void)
2850 -{
2851 - unsigned long rnd = get_random_int();
2852 -
2853 - rnd = rnd << PAGE_SHIFT;
2854 - /* 8MB for 32bit, 256MB for 64bit */
2855 - if (TASK_IS_32BIT_ADDR)
2856 - rnd = rnd & 0x7ffffful;
2857 - else
2858 - rnd = rnd & 0xffffffful;
2859 -
2860 - return rnd;
2861 -}
2862 -
2863 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2864 -{
2865 - unsigned long base = mm->brk;
2866 - unsigned long ret;
2867 -
2868 - ret = PAGE_ALIGN(base + brk_rnd());
2869 -
2870 - if (ret < mm->brk)
2871 - return mm->brk;
2872 -
2873 - return ret;
2874 -}
2875 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2876 index 967d144..db12197 100644
2877 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2878 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2879 @@ -11,12 +11,14 @@
2880 #ifndef _ASM_PROC_CACHE_H
2881 #define _ASM_PROC_CACHE_H
2882
2883 +#include <linux/const.h>
2884 +
2885 /* L1 cache */
2886
2887 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2888 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2889 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2890 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2891 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2892 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2893
2894 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2895 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2896 index bcb5df2..84fabd2 100644
2897 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2898 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2899 @@ -16,13 +16,15 @@
2900 #ifndef _ASM_PROC_CACHE_H
2901 #define _ASM_PROC_CACHE_H
2902
2903 +#include <linux/const.h>
2904 +
2905 /*
2906 * L1 cache
2907 */
2908 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2909 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2910 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2911 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2912 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2913 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2914
2915 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2916 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2917 index 4ce7a01..449202a 100644
2918 --- a/arch/openrisc/include/asm/cache.h
2919 +++ b/arch/openrisc/include/asm/cache.h
2920 @@ -19,11 +19,13 @@
2921 #ifndef __ASM_OPENRISC_CACHE_H
2922 #define __ASM_OPENRISC_CACHE_H
2923
2924 +#include <linux/const.h>
2925 +
2926 /* FIXME: How can we replace these with values from the CPU...
2927 * they shouldn't be hard-coded!
2928 */
2929
2930 -#define L1_CACHE_BYTES 16
2931 #define L1_CACHE_SHIFT 4
2932 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2933
2934 #endif /* __ASM_OPENRISC_CACHE_H */
2935 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2936 index 4054b31..a10c105 100644
2937 --- a/arch/parisc/include/asm/atomic.h
2938 +++ b/arch/parisc/include/asm/atomic.h
2939 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2940
2941 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2942
2943 +#define atomic64_read_unchecked(v) atomic64_read(v)
2944 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2945 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2946 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2947 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2948 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2949 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2950 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2951 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2952 +
2953 #endif /* !CONFIG_64BIT */
2954
2955
2956 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2957 index 47f11c7..3420df2 100644
2958 --- a/arch/parisc/include/asm/cache.h
2959 +++ b/arch/parisc/include/asm/cache.h
2960 @@ -5,6 +5,7 @@
2961 #ifndef __ARCH_PARISC_CACHE_H
2962 #define __ARCH_PARISC_CACHE_H
2963
2964 +#include <linux/const.h>
2965
2966 /*
2967 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2968 @@ -15,13 +16,13 @@
2969 * just ruin performance.
2970 */
2971 #ifdef CONFIG_PA20
2972 -#define L1_CACHE_BYTES 64
2973 #define L1_CACHE_SHIFT 6
2974 #else
2975 -#define L1_CACHE_BYTES 32
2976 #define L1_CACHE_SHIFT 5
2977 #endif
2978
2979 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2980 +
2981 #ifndef __ASSEMBLY__
2982
2983 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2984 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2985 index 19f6cb1..6c78cf2 100644
2986 --- a/arch/parisc/include/asm/elf.h
2987 +++ b/arch/parisc/include/asm/elf.h
2988 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2989
2990 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2991
2992 +#ifdef CONFIG_PAX_ASLR
2993 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2994 +
2995 +#define PAX_DELTA_MMAP_LEN 16
2996 +#define PAX_DELTA_STACK_LEN 16
2997 +#endif
2998 +
2999 /* This yields a mask that user programs can use to figure out what
3000 instruction set this CPU supports. This could be done in user space,
3001 but it's not easy, and we've already done it here. */
3002 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3003 index 22dadeb..f6c2be4 100644
3004 --- a/arch/parisc/include/asm/pgtable.h
3005 +++ b/arch/parisc/include/asm/pgtable.h
3006 @@ -210,6 +210,17 @@ struct vm_area_struct;
3007 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3008 #define PAGE_COPY PAGE_EXECREAD
3009 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3010 +
3011 +#ifdef CONFIG_PAX_PAGEEXEC
3012 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3013 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3014 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3015 +#else
3016 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3017 +# define PAGE_COPY_NOEXEC PAGE_COPY
3018 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3019 +#endif
3020 +
3021 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3022 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3023 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3024 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3025 index 5e34ccf..672bc9c 100644
3026 --- a/arch/parisc/kernel/module.c
3027 +++ b/arch/parisc/kernel/module.c
3028 @@ -98,16 +98,38 @@
3029
3030 /* three functions to determine where in the module core
3031 * or init pieces the location is */
3032 +static inline int in_init_rx(struct module *me, void *loc)
3033 +{
3034 + return (loc >= me->module_init_rx &&
3035 + loc < (me->module_init_rx + me->init_size_rx));
3036 +}
3037 +
3038 +static inline int in_init_rw(struct module *me, void *loc)
3039 +{
3040 + return (loc >= me->module_init_rw &&
3041 + loc < (me->module_init_rw + me->init_size_rw));
3042 +}
3043 +
3044 static inline int in_init(struct module *me, void *loc)
3045 {
3046 - return (loc >= me->module_init &&
3047 - loc <= (me->module_init + me->init_size));
3048 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3049 +}
3050 +
3051 +static inline int in_core_rx(struct module *me, void *loc)
3052 +{
3053 + return (loc >= me->module_core_rx &&
3054 + loc < (me->module_core_rx + me->core_size_rx));
3055 +}
3056 +
3057 +static inline int in_core_rw(struct module *me, void *loc)
3058 +{
3059 + return (loc >= me->module_core_rw &&
3060 + loc < (me->module_core_rw + me->core_size_rw));
3061 }
3062
3063 static inline int in_core(struct module *me, void *loc)
3064 {
3065 - return (loc >= me->module_core &&
3066 - loc <= (me->module_core + me->core_size));
3067 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3068 }
3069
3070 static inline int in_local(struct module *me, void *loc)
3071 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3072 }
3073
3074 /* align things a bit */
3075 - me->core_size = ALIGN(me->core_size, 16);
3076 - me->arch.got_offset = me->core_size;
3077 - me->core_size += gots * sizeof(struct got_entry);
3078 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3079 + me->arch.got_offset = me->core_size_rw;
3080 + me->core_size_rw += gots * sizeof(struct got_entry);
3081
3082 - me->core_size = ALIGN(me->core_size, 16);
3083 - me->arch.fdesc_offset = me->core_size;
3084 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3085 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3086 + me->arch.fdesc_offset = me->core_size_rw;
3087 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3088
3089 me->arch.got_max = gots;
3090 me->arch.fdesc_max = fdescs;
3091 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3092
3093 BUG_ON(value == 0);
3094
3095 - got = me->module_core + me->arch.got_offset;
3096 + got = me->module_core_rw + me->arch.got_offset;
3097 for (i = 0; got[i].addr; i++)
3098 if (got[i].addr == value)
3099 goto out;
3100 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3101 #ifdef CONFIG_64BIT
3102 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3103 {
3104 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3105 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3106
3107 if (!value) {
3108 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3109 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3110
3111 /* Create new one */
3112 fdesc->addr = value;
3113 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3114 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3115 return (Elf_Addr)fdesc;
3116 }
3117 #endif /* CONFIG_64BIT */
3118 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3119
3120 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3121 end = table + sechdrs[me->arch.unwind_section].sh_size;
3122 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3123 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3124
3125 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3126 me->arch.unwind_section, table, end, gp);
3127 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3128 index c9b9322..02d8940 100644
3129 --- a/arch/parisc/kernel/sys_parisc.c
3130 +++ b/arch/parisc/kernel/sys_parisc.c
3131 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3132 /* At this point: (!vma || addr < vma->vm_end). */
3133 if (TASK_SIZE - len < addr)
3134 return -ENOMEM;
3135 - if (!vma || addr + len <= vma->vm_start)
3136 + if (check_heap_stack_gap(vma, addr, len))
3137 return addr;
3138 addr = vma->vm_end;
3139 }
3140 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3141 /* At this point: (!vma || addr < vma->vm_end). */
3142 if (TASK_SIZE - len < addr)
3143 return -ENOMEM;
3144 - if (!vma || addr + len <= vma->vm_start)
3145 + if (check_heap_stack_gap(vma, addr, len))
3146 return addr;
3147 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3148 if (addr < vma->vm_end) /* handle wraparound */
3149 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3150 if (flags & MAP_FIXED)
3151 return addr;
3152 if (!addr)
3153 - addr = TASK_UNMAPPED_BASE;
3154 + addr = current->mm->mmap_base;
3155
3156 if (filp) {
3157 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3158 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3159 index f19e660..414fe24 100644
3160 --- a/arch/parisc/kernel/traps.c
3161 +++ b/arch/parisc/kernel/traps.c
3162 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3163
3164 down_read(&current->mm->mmap_sem);
3165 vma = find_vma(current->mm,regs->iaoq[0]);
3166 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3167 - && (vma->vm_flags & VM_EXEC)) {
3168 -
3169 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3170 fault_address = regs->iaoq[0];
3171 fault_space = regs->iasq[0];
3172
3173 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3174 index 18162ce..94de376 100644
3175 --- a/arch/parisc/mm/fault.c
3176 +++ b/arch/parisc/mm/fault.c
3177 @@ -15,6 +15,7 @@
3178 #include <linux/sched.h>
3179 #include <linux/interrupt.h>
3180 #include <linux/module.h>
3181 +#include <linux/unistd.h>
3182
3183 #include <asm/uaccess.h>
3184 #include <asm/traps.h>
3185 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3186 static unsigned long
3187 parisc_acctyp(unsigned long code, unsigned int inst)
3188 {
3189 - if (code == 6 || code == 16)
3190 + if (code == 6 || code == 7 || code == 16)
3191 return VM_EXEC;
3192
3193 switch (inst & 0xf0000000) {
3194 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3195 }
3196 #endif
3197
3198 +#ifdef CONFIG_PAX_PAGEEXEC
3199 +/*
3200 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3201 + *
3202 + * returns 1 when task should be killed
3203 + * 2 when rt_sigreturn trampoline was detected
3204 + * 3 when unpatched PLT trampoline was detected
3205 + */
3206 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3207 +{
3208 +
3209 +#ifdef CONFIG_PAX_EMUPLT
3210 + int err;
3211 +
3212 + do { /* PaX: unpatched PLT emulation */
3213 + unsigned int bl, depwi;
3214 +
3215 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3216 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3217 +
3218 + if (err)
3219 + break;
3220 +
3221 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3222 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3223 +
3224 + err = get_user(ldw, (unsigned int *)addr);
3225 + err |= get_user(bv, (unsigned int *)(addr+4));
3226 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3227 +
3228 + if (err)
3229 + break;
3230 +
3231 + if (ldw == 0x0E801096U &&
3232 + bv == 0xEAC0C000U &&
3233 + ldw2 == 0x0E881095U)
3234 + {
3235 + unsigned int resolver, map;
3236 +
3237 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3238 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3239 + if (err)
3240 + break;
3241 +
3242 + regs->gr[20] = instruction_pointer(regs)+8;
3243 + regs->gr[21] = map;
3244 + regs->gr[22] = resolver;
3245 + regs->iaoq[0] = resolver | 3UL;
3246 + regs->iaoq[1] = regs->iaoq[0] + 4;
3247 + return 3;
3248 + }
3249 + }
3250 + } while (0);
3251 +#endif
3252 +
3253 +#ifdef CONFIG_PAX_EMUTRAMP
3254 +
3255 +#ifndef CONFIG_PAX_EMUSIGRT
3256 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3257 + return 1;
3258 +#endif
3259 +
3260 + do { /* PaX: rt_sigreturn emulation */
3261 + unsigned int ldi1, ldi2, bel, nop;
3262 +
3263 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3264 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3265 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3266 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3267 +
3268 + if (err)
3269 + break;
3270 +
3271 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3272 + ldi2 == 0x3414015AU &&
3273 + bel == 0xE4008200U &&
3274 + nop == 0x08000240U)
3275 + {
3276 + regs->gr[25] = (ldi1 & 2) >> 1;
3277 + regs->gr[20] = __NR_rt_sigreturn;
3278 + regs->gr[31] = regs->iaoq[1] + 16;
3279 + regs->sr[0] = regs->iasq[1];
3280 + regs->iaoq[0] = 0x100UL;
3281 + regs->iaoq[1] = regs->iaoq[0] + 4;
3282 + regs->iasq[0] = regs->sr[2];
3283 + regs->iasq[1] = regs->sr[2];
3284 + return 2;
3285 + }
3286 + } while (0);
3287 +#endif
3288 +
3289 + return 1;
3290 +}
3291 +
3292 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3293 +{
3294 + unsigned long i;
3295 +
3296 + printk(KERN_ERR "PAX: bytes at PC: ");
3297 + for (i = 0; i < 5; i++) {
3298 + unsigned int c;
3299 + if (get_user(c, (unsigned int *)pc+i))
3300 + printk(KERN_CONT "???????? ");
3301 + else
3302 + printk(KERN_CONT "%08x ", c);
3303 + }
3304 + printk("\n");
3305 +}
3306 +#endif
3307 +
3308 int fixup_exception(struct pt_regs *regs)
3309 {
3310 const struct exception_table_entry *fix;
3311 @@ -192,8 +303,33 @@ good_area:
3312
3313 acc_type = parisc_acctyp(code,regs->iir);
3314
3315 - if ((vma->vm_flags & acc_type) != acc_type)
3316 + if ((vma->vm_flags & acc_type) != acc_type) {
3317 +
3318 +#ifdef CONFIG_PAX_PAGEEXEC
3319 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3320 + (address & ~3UL) == instruction_pointer(regs))
3321 + {
3322 + up_read(&mm->mmap_sem);
3323 + switch (pax_handle_fetch_fault(regs)) {
3324 +
3325 +#ifdef CONFIG_PAX_EMUPLT
3326 + case 3:
3327 + return;
3328 +#endif
3329 +
3330 +#ifdef CONFIG_PAX_EMUTRAMP
3331 + case 2:
3332 + return;
3333 +#endif
3334 +
3335 + }
3336 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3337 + do_group_exit(SIGKILL);
3338 + }
3339 +#endif
3340 +
3341 goto bad_area;
3342 + }
3343
3344 /*
3345 * If for any reason at all we couldn't handle the fault, make
3346 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3347 index 02e41b5..ec6e26c 100644
3348 --- a/arch/powerpc/include/asm/atomic.h
3349 +++ b/arch/powerpc/include/asm/atomic.h
3350 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3351
3352 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3353
3354 +#define atomic64_read_unchecked(v) atomic64_read(v)
3355 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3356 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3357 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3358 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3359 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3360 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3361 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3362 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3363 +
3364 #endif /* __powerpc64__ */
3365
3366 #endif /* __KERNEL__ */
3367 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3368 index 4b50941..5605819 100644
3369 --- a/arch/powerpc/include/asm/cache.h
3370 +++ b/arch/powerpc/include/asm/cache.h
3371 @@ -3,6 +3,7 @@
3372
3373 #ifdef __KERNEL__
3374
3375 +#include <linux/const.h>
3376
3377 /* bytes per L1 cache line */
3378 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3379 @@ -22,7 +23,7 @@
3380 #define L1_CACHE_SHIFT 7
3381 #endif
3382
3383 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3384 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3385
3386 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3387
3388 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3389 index 3bf9cca..e7457d0 100644
3390 --- a/arch/powerpc/include/asm/elf.h
3391 +++ b/arch/powerpc/include/asm/elf.h
3392 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3393 the loader. We need to make sure that it is out of the way of the program
3394 that it will "exec", and that there is sufficient room for the brk. */
3395
3396 -extern unsigned long randomize_et_dyn(unsigned long base);
3397 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3398 +#define ELF_ET_DYN_BASE (0x20000000)
3399 +
3400 +#ifdef CONFIG_PAX_ASLR
3401 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3402 +
3403 +#ifdef __powerpc64__
3404 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3405 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3406 +#else
3407 +#define PAX_DELTA_MMAP_LEN 15
3408 +#define PAX_DELTA_STACK_LEN 15
3409 +#endif
3410 +#endif
3411
3412 /*
3413 * Our registers are always unsigned longs, whether we're a 32 bit
3414 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3415 (0x7ff >> (PAGE_SHIFT - 12)) : \
3416 (0x3ffff >> (PAGE_SHIFT - 12)))
3417
3418 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3419 -#define arch_randomize_brk arch_randomize_brk
3420 -
3421 #endif /* __KERNEL__ */
3422
3423 /*
3424 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3425 index bca8fdc..61e9580 100644
3426 --- a/arch/powerpc/include/asm/kmap_types.h
3427 +++ b/arch/powerpc/include/asm/kmap_types.h
3428 @@ -27,6 +27,7 @@ enum km_type {
3429 KM_PPC_SYNC_PAGE,
3430 KM_PPC_SYNC_ICACHE,
3431 KM_KDB,
3432 + KM_CLEARPAGE,
3433 KM_TYPE_NR
3434 };
3435
3436 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3437 index d4a7f64..451de1c 100644
3438 --- a/arch/powerpc/include/asm/mman.h
3439 +++ b/arch/powerpc/include/asm/mman.h
3440 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3441 }
3442 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3443
3444 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3445 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3446 {
3447 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3448 }
3449 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3450 index f072e97..b436dee 100644
3451 --- a/arch/powerpc/include/asm/page.h
3452 +++ b/arch/powerpc/include/asm/page.h
3453 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3454 * and needs to be executable. This means the whole heap ends
3455 * up being executable.
3456 */
3457 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3458 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3459 +#define VM_DATA_DEFAULT_FLAGS32 \
3460 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3461 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3462
3463 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3464 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3465 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3466 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3467 #endif
3468
3469 +#define ktla_ktva(addr) (addr)
3470 +#define ktva_ktla(addr) (addr)
3471 +
3472 /*
3473 * Use the top bit of the higher-level page table entries to indicate whether
3474 * the entries we point to contain hugepages. This works because we know that
3475 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3476 index fed85e6..da5c71b 100644
3477 --- a/arch/powerpc/include/asm/page_64.h
3478 +++ b/arch/powerpc/include/asm/page_64.h
3479 @@ -146,15 +146,18 @@ do { \
3480 * stack by default, so in the absence of a PT_GNU_STACK program header
3481 * we turn execute permission off.
3482 */
3483 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3484 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3485 +#define VM_STACK_DEFAULT_FLAGS32 \
3486 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3487 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3488
3489 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3490 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3491
3492 +#ifndef CONFIG_PAX_PAGEEXEC
3493 #define VM_STACK_DEFAULT_FLAGS \
3494 (is_32bit_task() ? \
3495 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3496 +#endif
3497
3498 #include <asm-generic/getorder.h>
3499
3500 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3501 index 2e0e411..7899c68 100644
3502 --- a/arch/powerpc/include/asm/pgtable.h
3503 +++ b/arch/powerpc/include/asm/pgtable.h
3504 @@ -2,6 +2,7 @@
3505 #define _ASM_POWERPC_PGTABLE_H
3506 #ifdef __KERNEL__
3507
3508 +#include <linux/const.h>
3509 #ifndef __ASSEMBLY__
3510 #include <asm/processor.h> /* For TASK_SIZE */
3511 #include <asm/mmu.h>
3512 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3513 index 4aad413..85d86bf 100644
3514 --- a/arch/powerpc/include/asm/pte-hash32.h
3515 +++ b/arch/powerpc/include/asm/pte-hash32.h
3516 @@ -21,6 +21,7 @@
3517 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3518 #define _PAGE_USER 0x004 /* usermode access allowed */
3519 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3520 +#define _PAGE_EXEC _PAGE_GUARDED
3521 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3522 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3523 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3524 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3525 index 7fdc2c0..e47a9b02d3 100644
3526 --- a/arch/powerpc/include/asm/reg.h
3527 +++ b/arch/powerpc/include/asm/reg.h
3528 @@ -212,6 +212,7 @@
3529 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3530 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3531 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3532 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3533 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3534 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3535 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3536 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3537 index c377457..3c69fbc 100644
3538 --- a/arch/powerpc/include/asm/system.h
3539 +++ b/arch/powerpc/include/asm/system.h
3540 @@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3541 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3542 #endif
3543
3544 -extern unsigned long arch_align_stack(unsigned long sp);
3545 +#define arch_align_stack(x) ((x) & ~0xfUL)
3546
3547 /* Used in very early kernel initialization. */
3548 extern unsigned long reloc_offset(void);
3549 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3550 index bd0fb84..a42a14b 100644
3551 --- a/arch/powerpc/include/asm/uaccess.h
3552 +++ b/arch/powerpc/include/asm/uaccess.h
3553 @@ -13,6 +13,8 @@
3554 #define VERIFY_READ 0
3555 #define VERIFY_WRITE 1
3556
3557 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3558 +
3559 /*
3560 * The fs value determines whether argument validity checking should be
3561 * performed or not. If get_fs() == USER_DS, checking is performed, with
3562 @@ -327,52 +329,6 @@ do { \
3563 extern unsigned long __copy_tofrom_user(void __user *to,
3564 const void __user *from, unsigned long size);
3565
3566 -#ifndef __powerpc64__
3567 -
3568 -static inline unsigned long copy_from_user(void *to,
3569 - const void __user *from, unsigned long n)
3570 -{
3571 - unsigned long over;
3572 -
3573 - if (access_ok(VERIFY_READ, from, n))
3574 - return __copy_tofrom_user((__force void __user *)to, from, n);
3575 - if ((unsigned long)from < TASK_SIZE) {
3576 - over = (unsigned long)from + n - TASK_SIZE;
3577 - return __copy_tofrom_user((__force void __user *)to, from,
3578 - n - over) + over;
3579 - }
3580 - return n;
3581 -}
3582 -
3583 -static inline unsigned long copy_to_user(void __user *to,
3584 - const void *from, unsigned long n)
3585 -{
3586 - unsigned long over;
3587 -
3588 - if (access_ok(VERIFY_WRITE, to, n))
3589 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3590 - if ((unsigned long)to < TASK_SIZE) {
3591 - over = (unsigned long)to + n - TASK_SIZE;
3592 - return __copy_tofrom_user(to, (__force void __user *)from,
3593 - n - over) + over;
3594 - }
3595 - return n;
3596 -}
3597 -
3598 -#else /* __powerpc64__ */
3599 -
3600 -#define __copy_in_user(to, from, size) \
3601 - __copy_tofrom_user((to), (from), (size))
3602 -
3603 -extern unsigned long copy_from_user(void *to, const void __user *from,
3604 - unsigned long n);
3605 -extern unsigned long copy_to_user(void __user *to, const void *from,
3606 - unsigned long n);
3607 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3608 - unsigned long n);
3609 -
3610 -#endif /* __powerpc64__ */
3611 -
3612 static inline unsigned long __copy_from_user_inatomic(void *to,
3613 const void __user *from, unsigned long n)
3614 {
3615 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3616 if (ret == 0)
3617 return 0;
3618 }
3619 +
3620 + if (!__builtin_constant_p(n))
3621 + check_object_size(to, n, false);
3622 +
3623 return __copy_tofrom_user((__force void __user *)to, from, n);
3624 }
3625
3626 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3627 if (ret == 0)
3628 return 0;
3629 }
3630 +
3631 + if (!__builtin_constant_p(n))
3632 + check_object_size(from, n, true);
3633 +
3634 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3635 }
3636
3637 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3638 return __copy_to_user_inatomic(to, from, size);
3639 }
3640
3641 +#ifndef __powerpc64__
3642 +
3643 +static inline unsigned long __must_check copy_from_user(void *to,
3644 + const void __user *from, unsigned long n)
3645 +{
3646 + unsigned long over;
3647 +
3648 + if ((long)n < 0)
3649 + return n;
3650 +
3651 + if (access_ok(VERIFY_READ, from, n)) {
3652 + if (!__builtin_constant_p(n))
3653 + check_object_size(to, n, false);
3654 + return __copy_tofrom_user((__force void __user *)to, from, n);
3655 + }
3656 + if ((unsigned long)from < TASK_SIZE) {
3657 + over = (unsigned long)from + n - TASK_SIZE;
3658 + if (!__builtin_constant_p(n - over))
3659 + check_object_size(to, n - over, false);
3660 + return __copy_tofrom_user((__force void __user *)to, from,
3661 + n - over) + over;
3662 + }
3663 + return n;
3664 +}
3665 +
3666 +static inline unsigned long __must_check copy_to_user(void __user *to,
3667 + const void *from, unsigned long n)
3668 +{
3669 + unsigned long over;
3670 +
3671 + if ((long)n < 0)
3672 + return n;
3673 +
3674 + if (access_ok(VERIFY_WRITE, to, n)) {
3675 + if (!__builtin_constant_p(n))
3676 + check_object_size(from, n, true);
3677 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3678 + }
3679 + if ((unsigned long)to < TASK_SIZE) {
3680 + over = (unsigned long)to + n - TASK_SIZE;
3681 + if (!__builtin_constant_p(n))
3682 + check_object_size(from, n - over, true);
3683 + return __copy_tofrom_user(to, (__force void __user *)from,
3684 + n - over) + over;
3685 + }
3686 + return n;
3687 +}
3688 +
3689 +#else /* __powerpc64__ */
3690 +
3691 +#define __copy_in_user(to, from, size) \
3692 + __copy_tofrom_user((to), (from), (size))
3693 +
3694 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3695 +{
3696 + if ((long)n < 0 || n > INT_MAX)
3697 + return n;
3698 +
3699 + if (!__builtin_constant_p(n))
3700 + check_object_size(to, n, false);
3701 +
3702 + if (likely(access_ok(VERIFY_READ, from, n)))
3703 + n = __copy_from_user(to, from, n);
3704 + else
3705 + memset(to, 0, n);
3706 + return n;
3707 +}
3708 +
3709 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3710 +{
3711 + if ((long)n < 0 || n > INT_MAX)
3712 + return n;
3713 +
3714 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3715 + if (!__builtin_constant_p(n))
3716 + check_object_size(from, n, true);
3717 + n = __copy_to_user(to, from, n);
3718 + }
3719 + return n;
3720 +}
3721 +
3722 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3723 + unsigned long n);
3724 +
3725 +#endif /* __powerpc64__ */
3726 +
3727 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3728
3729 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3730 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3731 index 429983c..7af363b 100644
3732 --- a/arch/powerpc/kernel/exceptions-64e.S
3733 +++ b/arch/powerpc/kernel/exceptions-64e.S
3734 @@ -587,6 +587,7 @@ storage_fault_common:
3735 std r14,_DAR(r1)
3736 std r15,_DSISR(r1)
3737 addi r3,r1,STACK_FRAME_OVERHEAD
3738 + bl .save_nvgprs
3739 mr r4,r14
3740 mr r5,r15
3741 ld r14,PACA_EXGEN+EX_R14(r13)
3742 @@ -596,8 +597,7 @@ storage_fault_common:
3743 cmpdi r3,0
3744 bne- 1f
3745 b .ret_from_except_lite
3746 -1: bl .save_nvgprs
3747 - mr r5,r3
3748 +1: mr r5,r3
3749 addi r3,r1,STACK_FRAME_OVERHEAD
3750 ld r4,_DAR(r1)
3751 bl .bad_page_fault
3752 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3753 index 15c5a4f..22a4000 100644
3754 --- a/arch/powerpc/kernel/exceptions-64s.S
3755 +++ b/arch/powerpc/kernel/exceptions-64s.S
3756 @@ -1004,10 +1004,10 @@ handle_page_fault:
3757 11: ld r4,_DAR(r1)
3758 ld r5,_DSISR(r1)
3759 addi r3,r1,STACK_FRAME_OVERHEAD
3760 + bl .save_nvgprs
3761 bl .do_page_fault
3762 cmpdi r3,0
3763 beq+ 13f
3764 - bl .save_nvgprs
3765 mr r5,r3
3766 addi r3,r1,STACK_FRAME_OVERHEAD
3767 lwz r4,_DAR(r1)
3768 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3769 index 01e2877..a1ba360 100644
3770 --- a/arch/powerpc/kernel/irq.c
3771 +++ b/arch/powerpc/kernel/irq.c
3772 @@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3773 host->ops = ops;
3774 host->of_node = of_node_get(of_node);
3775
3776 - if (host->ops->match == NULL)
3777 - host->ops->match = default_irq_host_match;
3778 -
3779 raw_spin_lock_irqsave(&irq_big_lock, flags);
3780
3781 /* If it's a legacy controller, check for duplicates and
3782 @@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3783 */
3784 raw_spin_lock_irqsave(&irq_big_lock, flags);
3785 list_for_each_entry(h, &irq_hosts, link)
3786 - if (h->ops->match(h, node)) {
3787 + if (h->ops->match) {
3788 + if (h->ops->match(h, node)) {
3789 + found = h;
3790 + break;
3791 + }
3792 + } else if (default_irq_host_match(h, node)) {
3793 found = h;
3794 break;
3795 }
3796 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3797 index 0b6d796..d760ddb 100644
3798 --- a/arch/powerpc/kernel/module_32.c
3799 +++ b/arch/powerpc/kernel/module_32.c
3800 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3801 me->arch.core_plt_section = i;
3802 }
3803 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3804 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3805 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3806 return -ENOEXEC;
3807 }
3808
3809 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3810
3811 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3812 /* Init, or core PLT? */
3813 - if (location >= mod->module_core
3814 - && location < mod->module_core + mod->core_size)
3815 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3816 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3817 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3818 - else
3819 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3820 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3821 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3822 + else {
3823 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3824 + return ~0UL;
3825 + }
3826
3827 /* Find this entry, or if that fails, the next avail. entry */
3828 while (entry->jump[0]) {
3829 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3830 index d817ab0..b23b18e 100644
3831 --- a/arch/powerpc/kernel/process.c
3832 +++ b/arch/powerpc/kernel/process.c
3833 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3834 * Lookup NIP late so we have the best change of getting the
3835 * above info out without failing
3836 */
3837 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3838 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3839 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3840 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3841 #endif
3842 show_stack(current, (unsigned long *) regs->gpr[1]);
3843 if (!user_mode(regs))
3844 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3845 newsp = stack[0];
3846 ip = stack[STACK_FRAME_LR_SAVE];
3847 if (!firstframe || ip != lr) {
3848 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3849 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3850 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3851 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3852 - printk(" (%pS)",
3853 + printk(" (%pA)",
3854 (void *)current->ret_stack[curr_frame].ret);
3855 curr_frame--;
3856 }
3857 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3858 struct pt_regs *regs = (struct pt_regs *)
3859 (sp + STACK_FRAME_OVERHEAD);
3860 lr = regs->link;
3861 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3862 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3863 regs->trap, (void *)regs->nip, (void *)lr);
3864 firstframe = 1;
3865 }
3866 @@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
3867 }
3868
3869 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3870 -
3871 -unsigned long arch_align_stack(unsigned long sp)
3872 -{
3873 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3874 - sp -= get_random_int() & ~PAGE_MASK;
3875 - return sp & ~0xf;
3876 -}
3877 -
3878 -static inline unsigned long brk_rnd(void)
3879 -{
3880 - unsigned long rnd = 0;
3881 -
3882 - /* 8MB for 32bit, 1GB for 64bit */
3883 - if (is_32bit_task())
3884 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3885 - else
3886 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3887 -
3888 - return rnd << PAGE_SHIFT;
3889 -}
3890 -
3891 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3892 -{
3893 - unsigned long base = mm->brk;
3894 - unsigned long ret;
3895 -
3896 -#ifdef CONFIG_PPC_STD_MMU_64
3897 - /*
3898 - * If we are using 1TB segments and we are allowed to randomise
3899 - * the heap, we can put it above 1TB so it is backed by a 1TB
3900 - * segment. Otherwise the heap will be in the bottom 1TB
3901 - * which always uses 256MB segments and this may result in a
3902 - * performance penalty.
3903 - */
3904 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3905 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3906 -#endif
3907 -
3908 - ret = PAGE_ALIGN(base + brk_rnd());
3909 -
3910 - if (ret < mm->brk)
3911 - return mm->brk;
3912 -
3913 - return ret;
3914 -}
3915 -
3916 -unsigned long randomize_et_dyn(unsigned long base)
3917 -{
3918 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3919 -
3920 - if (ret < base)
3921 - return base;
3922 -
3923 - return ret;
3924 -}
3925 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3926 index 836a5a1..27289a3 100644
3927 --- a/arch/powerpc/kernel/signal_32.c
3928 +++ b/arch/powerpc/kernel/signal_32.c
3929 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3930 /* Save user registers on the stack */
3931 frame = &rt_sf->uc.uc_mcontext;
3932 addr = frame;
3933 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3934 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3935 if (save_user_regs(regs, frame, 0, 1))
3936 goto badframe;
3937 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3938 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3939 index a50b5ec..547078a 100644
3940 --- a/arch/powerpc/kernel/signal_64.c
3941 +++ b/arch/powerpc/kernel/signal_64.c
3942 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3943 current->thread.fpscr.val = 0;
3944
3945 /* Set up to return from userspace. */
3946 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3947 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3948 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3949 } else {
3950 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3951 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3952 index c091527..5592625 100644
3953 --- a/arch/powerpc/kernel/traps.c
3954 +++ b/arch/powerpc/kernel/traps.c
3955 @@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
3956 return flags;
3957 }
3958
3959 +extern void gr_handle_kernel_exploit(void);
3960 +
3961 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3962 int signr)
3963 {
3964 @@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3965 panic("Fatal exception in interrupt");
3966 if (panic_on_oops)
3967 panic("Fatal exception");
3968 +
3969 + gr_handle_kernel_exploit();
3970 +
3971 do_exit(signr);
3972 }
3973
3974 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3975 index 7d14bb6..1305601 100644
3976 --- a/arch/powerpc/kernel/vdso.c
3977 +++ b/arch/powerpc/kernel/vdso.c
3978 @@ -35,6 +35,7 @@
3979 #include <asm/firmware.h>
3980 #include <asm/vdso.h>
3981 #include <asm/vdso_datapage.h>
3982 +#include <asm/mman.h>
3983
3984 #include "setup.h"
3985
3986 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3987 vdso_base = VDSO32_MBASE;
3988 #endif
3989
3990 - current->mm->context.vdso_base = 0;
3991 + current->mm->context.vdso_base = ~0UL;
3992
3993 /* vDSO has a problem and was disabled, just don't "enable" it for the
3994 * process
3995 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3996 vdso_base = get_unmapped_area(NULL, vdso_base,
3997 (vdso_pages << PAGE_SHIFT) +
3998 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3999 - 0, 0);
4000 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4001 if (IS_ERR_VALUE(vdso_base)) {
4002 rc = vdso_base;
4003 goto fail_mmapsem;
4004 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4005 index 5eea6f3..5d10396 100644
4006 --- a/arch/powerpc/lib/usercopy_64.c
4007 +++ b/arch/powerpc/lib/usercopy_64.c
4008 @@ -9,22 +9,6 @@
4009 #include <linux/module.h>
4010 #include <asm/uaccess.h>
4011
4012 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4013 -{
4014 - if (likely(access_ok(VERIFY_READ, from, n)))
4015 - n = __copy_from_user(to, from, n);
4016 - else
4017 - memset(to, 0, n);
4018 - return n;
4019 -}
4020 -
4021 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4022 -{
4023 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4024 - n = __copy_to_user(to, from, n);
4025 - return n;
4026 -}
4027 -
4028 unsigned long copy_in_user(void __user *to, const void __user *from,
4029 unsigned long n)
4030 {
4031 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4032 return n;
4033 }
4034
4035 -EXPORT_SYMBOL(copy_from_user);
4036 -EXPORT_SYMBOL(copy_to_user);
4037 EXPORT_SYMBOL(copy_in_user);
4038
4039 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4040 index 2f0d1b0..36fb5cc 100644
4041 --- a/arch/powerpc/mm/fault.c
4042 +++ b/arch/powerpc/mm/fault.c
4043 @@ -32,6 +32,10 @@
4044 #include <linux/perf_event.h>
4045 #include <linux/magic.h>
4046 #include <linux/ratelimit.h>
4047 +#include <linux/slab.h>
4048 +#include <linux/pagemap.h>
4049 +#include <linux/compiler.h>
4050 +#include <linux/unistd.h>
4051
4052 #include <asm/firmware.h>
4053 #include <asm/page.h>
4054 @@ -43,6 +47,7 @@
4055 #include <asm/tlbflush.h>
4056 #include <asm/siginfo.h>
4057 #include <mm/mmu_decl.h>
4058 +#include <asm/ptrace.h>
4059
4060 #include "icswx.h"
4061
4062 @@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4063 }
4064 #endif
4065
4066 +#ifdef CONFIG_PAX_PAGEEXEC
4067 +/*
4068 + * PaX: decide what to do with offenders (regs->nip = fault address)
4069 + *
4070 + * returns 1 when task should be killed
4071 + */
4072 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4073 +{
4074 + return 1;
4075 +}
4076 +
4077 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4078 +{
4079 + unsigned long i;
4080 +
4081 + printk(KERN_ERR "PAX: bytes at PC: ");
4082 + for (i = 0; i < 5; i++) {
4083 + unsigned int c;
4084 + if (get_user(c, (unsigned int __user *)pc+i))
4085 + printk(KERN_CONT "???????? ");
4086 + else
4087 + printk(KERN_CONT "%08x ", c);
4088 + }
4089 + printk("\n");
4090 +}
4091 +#endif
4092 +
4093 /*
4094 * Check whether the instruction at regs->nip is a store using
4095 * an update addressing form which will update r1.
4096 @@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4097 * indicate errors in DSISR but can validly be set in SRR1.
4098 */
4099 if (trap == 0x400)
4100 - error_code &= 0x48200000;
4101 + error_code &= 0x58200000;
4102 else
4103 is_write = error_code & DSISR_ISSTORE;
4104 #else
4105 @@ -276,7 +308,7 @@ good_area:
4106 * "undefined". Of those that can be set, this is the only
4107 * one which seems bad.
4108 */
4109 - if (error_code & 0x10000000)
4110 + if (error_code & DSISR_GUARDED)
4111 /* Guarded storage error. */
4112 goto bad_area;
4113 #endif /* CONFIG_8xx */
4114 @@ -291,7 +323,7 @@ good_area:
4115 * processors use the same I/D cache coherency mechanism
4116 * as embedded.
4117 */
4118 - if (error_code & DSISR_PROTFAULT)
4119 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4120 goto bad_area;
4121 #endif /* CONFIG_PPC_STD_MMU */
4122
4123 @@ -360,6 +392,23 @@ bad_area:
4124 bad_area_nosemaphore:
4125 /* User mode accesses cause a SIGSEGV */
4126 if (user_mode(regs)) {
4127 +
4128 +#ifdef CONFIG_PAX_PAGEEXEC
4129 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4130 +#ifdef CONFIG_PPC_STD_MMU
4131 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4132 +#else
4133 + if (is_exec && regs->nip == address) {
4134 +#endif
4135 + switch (pax_handle_fetch_fault(regs)) {
4136 + }
4137 +
4138 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4139 + do_group_exit(SIGKILL);
4140 + }
4141 + }
4142 +#endif
4143 +
4144 _exception(SIGSEGV, regs, code, address);
4145 return 0;
4146 }
4147 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4148 index 67a42ed..1c7210c 100644
4149 --- a/arch/powerpc/mm/mmap_64.c
4150 +++ b/arch/powerpc/mm/mmap_64.c
4151 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4152 */
4153 if (mmap_is_legacy()) {
4154 mm->mmap_base = TASK_UNMAPPED_BASE;
4155 +
4156 +#ifdef CONFIG_PAX_RANDMMAP
4157 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4158 + mm->mmap_base += mm->delta_mmap;
4159 +#endif
4160 +
4161 mm->get_unmapped_area = arch_get_unmapped_area;
4162 mm->unmap_area = arch_unmap_area;
4163 } else {
4164 mm->mmap_base = mmap_base();
4165 +
4166 +#ifdef CONFIG_PAX_RANDMMAP
4167 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4168 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4169 +#endif
4170 +
4171 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4172 mm->unmap_area = arch_unmap_area_topdown;
4173 }
4174 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4175 index 73709f7..6b90313 100644
4176 --- a/arch/powerpc/mm/slice.c
4177 +++ b/arch/powerpc/mm/slice.c
4178 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4179 if ((mm->task_size - len) < addr)
4180 return 0;
4181 vma = find_vma(mm, addr);
4182 - return (!vma || (addr + len) <= vma->vm_start);
4183 + return check_heap_stack_gap(vma, addr, len);
4184 }
4185
4186 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4187 @@ -256,7 +256,7 @@ full_search:
4188 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4189 continue;
4190 }
4191 - if (!vma || addr + len <= vma->vm_start) {
4192 + if (check_heap_stack_gap(vma, addr, len)) {
4193 /*
4194 * Remember the place where we stopped the search:
4195 */
4196 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4197 }
4198 }
4199
4200 - addr = mm->mmap_base;
4201 - while (addr > len) {
4202 + if (mm->mmap_base < len)
4203 + addr = -ENOMEM;
4204 + else
4205 + addr = mm->mmap_base - len;
4206 +
4207 + while (!IS_ERR_VALUE(addr)) {
4208 /* Go down by chunk size */
4209 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4210 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4211
4212 /* Check for hit with different page size */
4213 mask = slice_range_to_mask(addr, len);
4214 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4215 * return with success:
4216 */
4217 vma = find_vma(mm, addr);
4218 - if (!vma || (addr + len) <= vma->vm_start) {
4219 + if (check_heap_stack_gap(vma, addr, len)) {
4220 /* remember the address as a hint for next time */
4221 if (use_cache)
4222 mm->free_area_cache = addr;
4223 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4224 mm->cached_hole_size = vma->vm_start - addr;
4225
4226 /* try just below the current vma->vm_start */
4227 - addr = vma->vm_start;
4228 + addr = skip_heap_stack_gap(vma, len);
4229 }
4230
4231 /*
4232 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4233 if (fixed && addr > (mm->task_size - len))
4234 return -EINVAL;
4235
4236 +#ifdef CONFIG_PAX_RANDMMAP
4237 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4238 + addr = 0;
4239 +#endif
4240 +
4241 /* If hint, make sure it matches our alignment restrictions */
4242 if (!fixed && addr) {
4243 addr = _ALIGN_UP(addr, 1ul << pshift);
4244 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4245 index 8517d2a..d2738d4 100644
4246 --- a/arch/s390/include/asm/atomic.h
4247 +++ b/arch/s390/include/asm/atomic.h
4248 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4249 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4250 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4251
4252 +#define atomic64_read_unchecked(v) atomic64_read(v)
4253 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4254 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4255 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4256 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4257 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4258 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4259 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4260 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4261 +
4262 #define smp_mb__before_atomic_dec() smp_mb()
4263 #define smp_mb__after_atomic_dec() smp_mb()
4264 #define smp_mb__before_atomic_inc() smp_mb()
4265 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4266 index 2a30d5a..5e5586f 100644
4267 --- a/arch/s390/include/asm/cache.h
4268 +++ b/arch/s390/include/asm/cache.h
4269 @@ -11,8 +11,10 @@
4270 #ifndef __ARCH_S390_CACHE_H
4271 #define __ARCH_S390_CACHE_H
4272
4273 -#define L1_CACHE_BYTES 256
4274 +#include <linux/const.h>
4275 +
4276 #define L1_CACHE_SHIFT 8
4277 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4278 #define NET_SKB_PAD 32
4279
4280 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4281 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4282 index 547f1a6..0b22b53 100644
4283 --- a/arch/s390/include/asm/elf.h
4284 +++ b/arch/s390/include/asm/elf.h
4285 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4286 the loader. We need to make sure that it is out of the way of the program
4287 that it will "exec", and that there is sufficient room for the brk. */
4288
4289 -extern unsigned long randomize_et_dyn(unsigned long base);
4290 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4291 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4292 +
4293 +#ifdef CONFIG_PAX_ASLR
4294 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4295 +
4296 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4297 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4298 +#endif
4299
4300 /* This yields a mask that user programs can use to figure out what
4301 instruction set this CPU supports. */
4302 @@ -211,7 +217,4 @@ struct linux_binprm;
4303 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4304 int arch_setup_additional_pages(struct linux_binprm *, int);
4305
4306 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4307 -#define arch_randomize_brk arch_randomize_brk
4308 -
4309 #endif
4310 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4311 index d73cc6b..1a296ad 100644
4312 --- a/arch/s390/include/asm/system.h
4313 +++ b/arch/s390/include/asm/system.h
4314 @@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4315 extern void (*_machine_halt)(void);
4316 extern void (*_machine_power_off)(void);
4317
4318 -extern unsigned long arch_align_stack(unsigned long sp);
4319 +#define arch_align_stack(x) ((x) & ~0xfUL)
4320
4321 static inline int tprot(unsigned long addr)
4322 {
4323 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4324 index 2b23885..e136e31 100644
4325 --- a/arch/s390/include/asm/uaccess.h
4326 +++ b/arch/s390/include/asm/uaccess.h
4327 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4328 copy_to_user(void __user *to, const void *from, unsigned long n)
4329 {
4330 might_fault();
4331 +
4332 + if ((long)n < 0)
4333 + return n;
4334 +
4335 if (access_ok(VERIFY_WRITE, to, n))
4336 n = __copy_to_user(to, from, n);
4337 return n;
4338 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4339 static inline unsigned long __must_check
4340 __copy_from_user(void *to, const void __user *from, unsigned long n)
4341 {
4342 + if ((long)n < 0)
4343 + return n;
4344 +
4345 if (__builtin_constant_p(n) && (n <= 256))
4346 return uaccess.copy_from_user_small(n, from, to);
4347 else
4348 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4349 unsigned int sz = __compiletime_object_size(to);
4350
4351 might_fault();
4352 +
4353 + if ((long)n < 0)
4354 + return n;
4355 +
4356 if (unlikely(sz != -1 && sz < n)) {
4357 copy_from_user_overflow();
4358 return n;
4359 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4360 index dfcb343..eda788a 100644
4361 --- a/arch/s390/kernel/module.c
4362 +++ b/arch/s390/kernel/module.c
4363 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4364
4365 /* Increase core size by size of got & plt and set start
4366 offsets for got and plt. */
4367 - me->core_size = ALIGN(me->core_size, 4);
4368 - me->arch.got_offset = me->core_size;
4369 - me->core_size += me->arch.got_size;
4370 - me->arch.plt_offset = me->core_size;
4371 - me->core_size += me->arch.plt_size;
4372 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4373 + me->arch.got_offset = me->core_size_rw;
4374 + me->core_size_rw += me->arch.got_size;
4375 + me->arch.plt_offset = me->core_size_rx;
4376 + me->core_size_rx += me->arch.plt_size;
4377 return 0;
4378 }
4379
4380 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4381 if (info->got_initialized == 0) {
4382 Elf_Addr *gotent;
4383
4384 - gotent = me->module_core + me->arch.got_offset +
4385 + gotent = me->module_core_rw + me->arch.got_offset +
4386 info->got_offset;
4387 *gotent = val;
4388 info->got_initialized = 1;
4389 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4390 else if (r_type == R_390_GOTENT ||
4391 r_type == R_390_GOTPLTENT)
4392 *(unsigned int *) loc =
4393 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4394 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4395 else if (r_type == R_390_GOT64 ||
4396 r_type == R_390_GOTPLT64)
4397 *(unsigned long *) loc = val;
4398 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4399 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4400 if (info->plt_initialized == 0) {
4401 unsigned int *ip;
4402 - ip = me->module_core + me->arch.plt_offset +
4403 + ip = me->module_core_rx + me->arch.plt_offset +
4404 info->plt_offset;
4405 #ifndef CONFIG_64BIT
4406 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4407 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4408 val - loc + 0xffffUL < 0x1ffffeUL) ||
4409 (r_type == R_390_PLT32DBL &&
4410 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4411 - val = (Elf_Addr) me->module_core +
4412 + val = (Elf_Addr) me->module_core_rx +
4413 me->arch.plt_offset +
4414 info->plt_offset;
4415 val += rela->r_addend - loc;
4416 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4417 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4418 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4419 val = val + rela->r_addend -
4420 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4421 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4422 if (r_type == R_390_GOTOFF16)
4423 *(unsigned short *) loc = val;
4424 else if (r_type == R_390_GOTOFF32)
4425 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4426 break;
4427 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4428 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4429 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4430 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4431 rela->r_addend - loc;
4432 if (r_type == R_390_GOTPC)
4433 *(unsigned int *) loc = val;
4434 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4435 index e795933..b32563c 100644
4436 --- a/arch/s390/kernel/process.c
4437 +++ b/arch/s390/kernel/process.c
4438 @@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4439 }
4440 return 0;
4441 }
4442 -
4443 -unsigned long arch_align_stack(unsigned long sp)
4444 -{
4445 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4446 - sp -= get_random_int() & ~PAGE_MASK;
4447 - return sp & ~0xf;
4448 -}
4449 -
4450 -static inline unsigned long brk_rnd(void)
4451 -{
4452 - /* 8MB for 32bit, 1GB for 64bit */
4453 - if (is_32bit_task())
4454 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4455 - else
4456 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4457 -}
4458 -
4459 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4460 -{
4461 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4462 -
4463 - if (ret < mm->brk)
4464 - return mm->brk;
4465 - return ret;
4466 -}
4467 -
4468 -unsigned long randomize_et_dyn(unsigned long base)
4469 -{
4470 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4471 -
4472 - if (!(current->flags & PF_RANDOMIZE))
4473 - return base;
4474 - if (ret < base)
4475 - return base;
4476 - return ret;
4477 -}
4478 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4479 index a0155c0..34cc491 100644
4480 --- a/arch/s390/mm/mmap.c
4481 +++ b/arch/s390/mm/mmap.c
4482 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4483 */
4484 if (mmap_is_legacy()) {
4485 mm->mmap_base = TASK_UNMAPPED_BASE;
4486 +
4487 +#ifdef CONFIG_PAX_RANDMMAP
4488 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4489 + mm->mmap_base += mm->delta_mmap;
4490 +#endif
4491 +
4492 mm->get_unmapped_area = arch_get_unmapped_area;
4493 mm->unmap_area = arch_unmap_area;
4494 } else {
4495 mm->mmap_base = mmap_base();
4496 +
4497 +#ifdef CONFIG_PAX_RANDMMAP
4498 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4499 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4500 +#endif
4501 +
4502 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4503 mm->unmap_area = arch_unmap_area_topdown;
4504 }
4505 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4506 */
4507 if (mmap_is_legacy()) {
4508 mm->mmap_base = TASK_UNMAPPED_BASE;
4509 +
4510 +#ifdef CONFIG_PAX_RANDMMAP
4511 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4512 + mm->mmap_base += mm->delta_mmap;
4513 +#endif
4514 +
4515 mm->get_unmapped_area = s390_get_unmapped_area;
4516 mm->unmap_area = arch_unmap_area;
4517 } else {
4518 mm->mmap_base = mmap_base();
4519 +
4520 +#ifdef CONFIG_PAX_RANDMMAP
4521 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4522 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4523 +#endif
4524 +
4525 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4526 mm->unmap_area = arch_unmap_area_topdown;
4527 }
4528 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4529 index ae3d59f..f65f075 100644
4530 --- a/arch/score/include/asm/cache.h
4531 +++ b/arch/score/include/asm/cache.h
4532 @@ -1,7 +1,9 @@
4533 #ifndef _ASM_SCORE_CACHE_H
4534 #define _ASM_SCORE_CACHE_H
4535
4536 +#include <linux/const.h>
4537 +
4538 #define L1_CACHE_SHIFT 4
4539 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4540 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4541
4542 #endif /* _ASM_SCORE_CACHE_H */
4543 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4544 index 589d5c7..669e274 100644
4545 --- a/arch/score/include/asm/system.h
4546 +++ b/arch/score/include/asm/system.h
4547 @@ -17,7 +17,7 @@ do { \
4548 #define finish_arch_switch(prev) do {} while (0)
4549
4550 typedef void (*vi_handler_t)(void);
4551 -extern unsigned long arch_align_stack(unsigned long sp);
4552 +#define arch_align_stack(x) (x)
4553
4554 #define mb() barrier()
4555 #define rmb() barrier()
4556 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4557 index 25d0803..d6c8e36 100644
4558 --- a/arch/score/kernel/process.c
4559 +++ b/arch/score/kernel/process.c
4560 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4561
4562 return task_pt_regs(task)->cp0_epc;
4563 }
4564 -
4565 -unsigned long arch_align_stack(unsigned long sp)
4566 -{
4567 - return sp;
4568 -}
4569 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4570 index ef9e555..331bd29 100644
4571 --- a/arch/sh/include/asm/cache.h
4572 +++ b/arch/sh/include/asm/cache.h
4573 @@ -9,10 +9,11 @@
4574 #define __ASM_SH_CACHE_H
4575 #ifdef __KERNEL__
4576
4577 +#include <linux/const.h>
4578 #include <linux/init.h>
4579 #include <cpu/cache.h>
4580
4581 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4582 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4583
4584 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4585
4586 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4587 index afeb710..d1d1289 100644
4588 --- a/arch/sh/mm/mmap.c
4589 +++ b/arch/sh/mm/mmap.c
4590 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4591 addr = PAGE_ALIGN(addr);
4592
4593 vma = find_vma(mm, addr);
4594 - if (TASK_SIZE - len >= addr &&
4595 - (!vma || addr + len <= vma->vm_start))
4596 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4597 return addr;
4598 }
4599
4600 @@ -106,7 +105,7 @@ full_search:
4601 }
4602 return -ENOMEM;
4603 }
4604 - if (likely(!vma || addr + len <= vma->vm_start)) {
4605 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4606 /*
4607 * Remember the place where we stopped the search:
4608 */
4609 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4610 addr = PAGE_ALIGN(addr);
4611
4612 vma = find_vma(mm, addr);
4613 - if (TASK_SIZE - len >= addr &&
4614 - (!vma || addr + len <= vma->vm_start))
4615 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4616 return addr;
4617 }
4618
4619 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4620 /* make sure it can fit in the remaining address space */
4621 if (likely(addr > len)) {
4622 vma = find_vma(mm, addr-len);
4623 - if (!vma || addr <= vma->vm_start) {
4624 + if (check_heap_stack_gap(vma, addr - len, len)) {
4625 /* remember the address as a hint for next time */
4626 return (mm->free_area_cache = addr-len);
4627 }
4628 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4629 if (unlikely(mm->mmap_base < len))
4630 goto bottomup;
4631
4632 - addr = mm->mmap_base-len;
4633 - if (do_colour_align)
4634 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4635 + addr = mm->mmap_base - len;
4636
4637 do {
4638 + if (do_colour_align)
4639 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4640 /*
4641 * Lookup failure means no vma is above this address,
4642 * else if new region fits below vma->vm_start,
4643 * return with success:
4644 */
4645 vma = find_vma(mm, addr);
4646 - if (likely(!vma || addr+len <= vma->vm_start)) {
4647 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4648 /* remember the address as a hint for next time */
4649 return (mm->free_area_cache = addr);
4650 }
4651 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4652 mm->cached_hole_size = vma->vm_start - addr;
4653
4654 /* try just below the current vma->vm_start */
4655 - addr = vma->vm_start-len;
4656 - if (do_colour_align)
4657 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4658 - } while (likely(len < vma->vm_start));
4659 + addr = skip_heap_stack_gap(vma, len);
4660 + } while (!IS_ERR_VALUE(addr));
4661
4662 bottomup:
4663 /*
4664 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4665 index eddcfb3..b117d90 100644
4666 --- a/arch/sparc/Makefile
4667 +++ b/arch/sparc/Makefile
4668 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4669 # Export what is needed by arch/sparc/boot/Makefile
4670 export VMLINUX_INIT VMLINUX_MAIN
4671 VMLINUX_INIT := $(head-y) $(init-y)
4672 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4673 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4674 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4675 VMLINUX_MAIN += $(drivers-y) $(net-y)
4676
4677 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4678 index 9f421df..b81fc12 100644
4679 --- a/arch/sparc/include/asm/atomic_64.h
4680 +++ b/arch/sparc/include/asm/atomic_64.h
4681 @@ -14,18 +14,40 @@
4682 #define ATOMIC64_INIT(i) { (i) }
4683
4684 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4685 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4686 +{
4687 + return v->counter;
4688 +}
4689 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4690 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4691 +{
4692 + return v->counter;
4693 +}
4694
4695 #define atomic_set(v, i) (((v)->counter) = i)
4696 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4697 +{
4698 + v->counter = i;
4699 +}
4700 #define atomic64_set(v, i) (((v)->counter) = i)
4701 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4702 +{
4703 + v->counter = i;
4704 +}
4705
4706 extern void atomic_add(int, atomic_t *);
4707 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4708 extern void atomic64_add(long, atomic64_t *);
4709 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4710 extern void atomic_sub(int, atomic_t *);
4711 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4712 extern void atomic64_sub(long, atomic64_t *);
4713 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4714
4715 extern int atomic_add_ret(int, atomic_t *);
4716 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4717 extern long atomic64_add_ret(long, atomic64_t *);
4718 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4719 extern int atomic_sub_ret(int, atomic_t *);
4720 extern long atomic64_sub_ret(long, atomic64_t *);
4721
4722 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4723 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4724
4725 #define atomic_inc_return(v) atomic_add_ret(1, v)
4726 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4727 +{
4728 + return atomic_add_ret_unchecked(1, v);
4729 +}
4730 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4731 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4732 +{
4733 + return atomic64_add_ret_unchecked(1, v);
4734 +}
4735
4736 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4737 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4738
4739 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4740 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4741 +{
4742 + return atomic_add_ret_unchecked(i, v);
4743 +}
4744 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4745 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4746 +{
4747 + return atomic64_add_ret_unchecked(i, v);
4748 +}
4749
4750 /*
4751 * atomic_inc_and_test - increment and test
4752 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4753 * other cases.
4754 */
4755 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4756 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4757 +{
4758 + return atomic_inc_return_unchecked(v) == 0;
4759 +}
4760 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4761
4762 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4763 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4764 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4765
4766 #define atomic_inc(v) atomic_add(1, v)
4767 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4768 +{
4769 + atomic_add_unchecked(1, v);
4770 +}
4771 #define atomic64_inc(v) atomic64_add(1, v)
4772 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4773 +{
4774 + atomic64_add_unchecked(1, v);
4775 +}
4776
4777 #define atomic_dec(v) atomic_sub(1, v)
4778 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4779 +{
4780 + atomic_sub_unchecked(1, v);
4781 +}
4782 #define atomic64_dec(v) atomic64_sub(1, v)
4783 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4784 +{
4785 + atomic64_sub_unchecked(1, v);
4786 +}
4787
4788 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4789 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4790
4791 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4792 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4793 +{
4794 + return cmpxchg(&v->counter, old, new);
4795 +}
4796 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4797 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4798 +{
4799 + return xchg(&v->counter, new);
4800 +}
4801
4802 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4803 {
4804 - int c, old;
4805 + int c, old, new;
4806 c = atomic_read(v);
4807 for (;;) {
4808 - if (unlikely(c == (u)))
4809 + if (unlikely(c == u))
4810 break;
4811 - old = atomic_cmpxchg((v), c, c + (a));
4812 +
4813 + asm volatile("addcc %2, %0, %0\n"
4814 +
4815 +#ifdef CONFIG_PAX_REFCOUNT
4816 + "tvs %%icc, 6\n"
4817 +#endif
4818 +
4819 + : "=r" (new)
4820 + : "0" (c), "ir" (a)
4821 + : "cc");
4822 +
4823 + old = atomic_cmpxchg(v, c, new);
4824 if (likely(old == c))
4825 break;
4826 c = old;
4827 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4828 #define atomic64_cmpxchg(v, o, n) \
4829 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4830 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4831 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4832 +{
4833 + return xchg(&v->counter, new);
4834 +}
4835
4836 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4837 {
4838 - long c, old;
4839 + long c, old, new;
4840 c = atomic64_read(v);
4841 for (;;) {
4842 - if (unlikely(c == (u)))
4843 + if (unlikely(c == u))
4844 break;
4845 - old = atomic64_cmpxchg((v), c, c + (a));
4846 +
4847 + asm volatile("addcc %2, %0, %0\n"
4848 +
4849 +#ifdef CONFIG_PAX_REFCOUNT
4850 + "tvs %%xcc, 6\n"
4851 +#endif
4852 +
4853 + : "=r" (new)
4854 + : "0" (c), "ir" (a)
4855 + : "cc");
4856 +
4857 + old = atomic64_cmpxchg(v, c, new);
4858 if (likely(old == c))
4859 break;
4860 c = old;
4861 }
4862 - return c != (u);
4863 + return c != u;
4864 }
4865
4866 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4867 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4868 index 69358b5..9d0d492 100644
4869 --- a/arch/sparc/include/asm/cache.h
4870 +++ b/arch/sparc/include/asm/cache.h
4871 @@ -7,10 +7,12 @@
4872 #ifndef _SPARC_CACHE_H
4873 #define _SPARC_CACHE_H
4874
4875 +#include <linux/const.h>
4876 +
4877 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4878
4879 #define L1_CACHE_SHIFT 5
4880 -#define L1_CACHE_BYTES 32
4881 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4882
4883 #ifdef CONFIG_SPARC32
4884 #define SMP_CACHE_BYTES_SHIFT 5
4885 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4886 index 4269ca6..e3da77f 100644
4887 --- a/arch/sparc/include/asm/elf_32.h
4888 +++ b/arch/sparc/include/asm/elf_32.h
4889 @@ -114,6 +114,13 @@ typedef struct {
4890
4891 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4892
4893 +#ifdef CONFIG_PAX_ASLR
4894 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4895 +
4896 +#define PAX_DELTA_MMAP_LEN 16
4897 +#define PAX_DELTA_STACK_LEN 16
4898 +#endif
4899 +
4900 /* This yields a mask that user programs can use to figure out what
4901 instruction set this cpu supports. This can NOT be done in userspace
4902 on Sparc. */
4903 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4904 index 7df8b7f..4946269 100644
4905 --- a/arch/sparc/include/asm/elf_64.h
4906 +++ b/arch/sparc/include/asm/elf_64.h
4907 @@ -180,6 +180,13 @@ typedef struct {
4908 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4909 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4910
4911 +#ifdef CONFIG_PAX_ASLR
4912 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4913 +
4914 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4915 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4916 +#endif
4917 +
4918 extern unsigned long sparc64_elf_hwcap;
4919 #define ELF_HWCAP sparc64_elf_hwcap
4920
4921 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4922 index a790cc6..091ed94 100644
4923 --- a/arch/sparc/include/asm/pgtable_32.h
4924 +++ b/arch/sparc/include/asm/pgtable_32.h
4925 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4926 BTFIXUPDEF_INT(page_none)
4927 BTFIXUPDEF_INT(page_copy)
4928 BTFIXUPDEF_INT(page_readonly)
4929 +
4930 +#ifdef CONFIG_PAX_PAGEEXEC
4931 +BTFIXUPDEF_INT(page_shared_noexec)
4932 +BTFIXUPDEF_INT(page_copy_noexec)
4933 +BTFIXUPDEF_INT(page_readonly_noexec)
4934 +#endif
4935 +
4936 BTFIXUPDEF_INT(page_kernel)
4937
4938 #define PMD_SHIFT SUN4C_PMD_SHIFT
4939 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4940 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4941 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4942
4943 +#ifdef CONFIG_PAX_PAGEEXEC
4944 +extern pgprot_t PAGE_SHARED_NOEXEC;
4945 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4946 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4947 +#else
4948 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4949 +# define PAGE_COPY_NOEXEC PAGE_COPY
4950 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4951 +#endif
4952 +
4953 extern unsigned long page_kernel;
4954
4955 #ifdef MODULE
4956 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4957 index f6ae2b2..b03ffc7 100644
4958 --- a/arch/sparc/include/asm/pgtsrmmu.h
4959 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4960 @@ -115,6 +115,13 @@
4961 SRMMU_EXEC | SRMMU_REF)
4962 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4963 SRMMU_EXEC | SRMMU_REF)
4964 +
4965 +#ifdef CONFIG_PAX_PAGEEXEC
4966 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4967 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4968 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4969 +#endif
4970 +
4971 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4972 SRMMU_DIRTY | SRMMU_REF)
4973
4974 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4975 index 9689176..63c18ea 100644
4976 --- a/arch/sparc/include/asm/spinlock_64.h
4977 +++ b/arch/sparc/include/asm/spinlock_64.h
4978 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4979
4980 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4981
4982 -static void inline arch_read_lock(arch_rwlock_t *lock)
4983 +static inline void arch_read_lock(arch_rwlock_t *lock)
4984 {
4985 unsigned long tmp1, tmp2;
4986
4987 __asm__ __volatile__ (
4988 "1: ldsw [%2], %0\n"
4989 " brlz,pn %0, 2f\n"
4990 -"4: add %0, 1, %1\n"
4991 +"4: addcc %0, 1, %1\n"
4992 +
4993 +#ifdef CONFIG_PAX_REFCOUNT
4994 +" tvs %%icc, 6\n"
4995 +#endif
4996 +
4997 " cas [%2], %0, %1\n"
4998 " cmp %0, %1\n"
4999 " bne,pn %%icc, 1b\n"
5000 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5001 " .previous"
5002 : "=&r" (tmp1), "=&r" (tmp2)
5003 : "r" (lock)
5004 - : "memory");
5005 + : "memory", "cc");
5006 }
5007
5008 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5009 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5010 {
5011 int tmp1, tmp2;
5012
5013 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5014 "1: ldsw [%2], %0\n"
5015 " brlz,a,pn %0, 2f\n"
5016 " mov 0, %0\n"
5017 -" add %0, 1, %1\n"
5018 +" addcc %0, 1, %1\n"
5019 +
5020 +#ifdef CONFIG_PAX_REFCOUNT
5021 +" tvs %%icc, 6\n"
5022 +#endif
5023 +
5024 " cas [%2], %0, %1\n"
5025 " cmp %0, %1\n"
5026 " bne,pn %%icc, 1b\n"
5027 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5028 return tmp1;
5029 }
5030
5031 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5032 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5033 {
5034 unsigned long tmp1, tmp2;
5035
5036 __asm__ __volatile__(
5037 "1: lduw [%2], %0\n"
5038 -" sub %0, 1, %1\n"
5039 +" subcc %0, 1, %1\n"
5040 +
5041 +#ifdef CONFIG_PAX_REFCOUNT
5042 +" tvs %%icc, 6\n"
5043 +#endif
5044 +
5045 " cas [%2], %0, %1\n"
5046 " cmp %0, %1\n"
5047 " bne,pn %%xcc, 1b\n"
5048 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5049 : "memory");
5050 }
5051
5052 -static void inline arch_write_lock(arch_rwlock_t *lock)
5053 +static inline void arch_write_lock(arch_rwlock_t *lock)
5054 {
5055 unsigned long mask, tmp1, tmp2;
5056
5057 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5058 : "memory");
5059 }
5060
5061 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5062 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5063 {
5064 __asm__ __volatile__(
5065 " stw %%g0, [%0]"
5066 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5067 : "memory");
5068 }
5069
5070 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5071 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5072 {
5073 unsigned long mask, tmp1, tmp2, result;
5074
5075 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5076 index c2a1080..21ed218 100644
5077 --- a/arch/sparc/include/asm/thread_info_32.h
5078 +++ b/arch/sparc/include/asm/thread_info_32.h
5079 @@ -50,6 +50,8 @@ struct thread_info {
5080 unsigned long w_saved;
5081
5082 struct restart_block restart_block;
5083 +
5084 + unsigned long lowest_stack;
5085 };
5086
5087 /*
5088 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5089 index 01d057f..0a02f7e 100644
5090 --- a/arch/sparc/include/asm/thread_info_64.h
5091 +++ b/arch/sparc/include/asm/thread_info_64.h
5092 @@ -63,6 +63,8 @@ struct thread_info {
5093 struct pt_regs *kern_una_regs;
5094 unsigned int kern_una_insn;
5095
5096 + unsigned long lowest_stack;
5097 +
5098 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5099 };
5100
5101 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5102 index e88fbe5..96b0ce5 100644
5103 --- a/arch/sparc/include/asm/uaccess.h
5104 +++ b/arch/sparc/include/asm/uaccess.h
5105 @@ -1,5 +1,13 @@
5106 #ifndef ___ASM_SPARC_UACCESS_H
5107 #define ___ASM_SPARC_UACCESS_H
5108 +
5109 +#ifdef __KERNEL__
5110 +#ifndef __ASSEMBLY__
5111 +#include <linux/types.h>
5112 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5113 +#endif
5114 +#endif
5115 +
5116 #if defined(__sparc__) && defined(__arch64__)
5117 #include <asm/uaccess_64.h>
5118 #else
5119 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5120 index 8303ac4..07f333d 100644
5121 --- a/arch/sparc/include/asm/uaccess_32.h
5122 +++ b/arch/sparc/include/asm/uaccess_32.h
5123 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5124
5125 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5126 {
5127 - if (n && __access_ok((unsigned long) to, n))
5128 + if ((long)n < 0)
5129 + return n;
5130 +
5131 + if (n && __access_ok((unsigned long) to, n)) {
5132 + if (!__builtin_constant_p(n))
5133 + check_object_size(from, n, true);
5134 return __copy_user(to, (__force void __user *) from, n);
5135 - else
5136 + } else
5137 return n;
5138 }
5139
5140 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5141 {
5142 + if ((long)n < 0)
5143 + return n;
5144 +
5145 + if (!__builtin_constant_p(n))
5146 + check_object_size(from, n, true);
5147 +
5148 return __copy_user(to, (__force void __user *) from, n);
5149 }
5150
5151 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5152 {
5153 - if (n && __access_ok((unsigned long) from, n))
5154 + if ((long)n < 0)
5155 + return n;
5156 +
5157 + if (n && __access_ok((unsigned long) from, n)) {
5158 + if (!__builtin_constant_p(n))
5159 + check_object_size(to, n, false);
5160 return __copy_user((__force void __user *) to, from, n);
5161 - else
5162 + } else
5163 return n;
5164 }
5165
5166 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5167 {
5168 + if ((long)n < 0)
5169 + return n;
5170 +
5171 return __copy_user((__force void __user *) to, from, n);
5172 }
5173
5174 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5175 index 3e1449f..5293a0e 100644
5176 --- a/arch/sparc/include/asm/uaccess_64.h
5177 +++ b/arch/sparc/include/asm/uaccess_64.h
5178 @@ -10,6 +10,7 @@
5179 #include <linux/compiler.h>
5180 #include <linux/string.h>
5181 #include <linux/thread_info.h>
5182 +#include <linux/kernel.h>
5183 #include <asm/asi.h>
5184 #include <asm/system.h>
5185 #include <asm/spitfire.h>
5186 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5187 static inline unsigned long __must_check
5188 copy_from_user(void *to, const void __user *from, unsigned long size)
5189 {
5190 - unsigned long ret = ___copy_from_user(to, from, size);
5191 + unsigned long ret;
5192
5193 + if ((long)size < 0 || size > INT_MAX)
5194 + return size;
5195 +
5196 + if (!__builtin_constant_p(size))
5197 + check_object_size(to, size, false);
5198 +
5199 + ret = ___copy_from_user(to, from, size);
5200 if (unlikely(ret))
5201 ret = copy_from_user_fixup(to, from, size);
5202
5203 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5204 static inline unsigned long __must_check
5205 copy_to_user(void __user *to, const void *from, unsigned long size)
5206 {
5207 - unsigned long ret = ___copy_to_user(to, from, size);
5208 + unsigned long ret;
5209
5210 + if ((long)size < 0 || size > INT_MAX)
5211 + return size;
5212 +
5213 + if (!__builtin_constant_p(size))
5214 + check_object_size(from, size, true);
5215 +
5216 + ret = ___copy_to_user(to, from, size);
5217 if (unlikely(ret))
5218 ret = copy_to_user_fixup(to, from, size);
5219 return ret;
5220 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5221 index cb85458..e063f17 100644
5222 --- a/arch/sparc/kernel/Makefile
5223 +++ b/arch/sparc/kernel/Makefile
5224 @@ -3,7 +3,7 @@
5225 #
5226
5227 asflags-y := -ansi
5228 -ccflags-y := -Werror
5229 +#ccflags-y := -Werror
5230
5231 extra-y := head_$(BITS).o
5232 extra-y += init_task.o
5233 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5234 index f793742..4d880af 100644
5235 --- a/arch/sparc/kernel/process_32.c
5236 +++ b/arch/sparc/kernel/process_32.c
5237 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5238 rw->ins[4], rw->ins[5],
5239 rw->ins[6],
5240 rw->ins[7]);
5241 - printk("%pS\n", (void *) rw->ins[7]);
5242 + printk("%pA\n", (void *) rw->ins[7]);
5243 rw = (struct reg_window32 *) rw->ins[6];
5244 }
5245 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5246 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5247
5248 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5249 r->psr, r->pc, r->npc, r->y, print_tainted());
5250 - printk("PC: <%pS>\n", (void *) r->pc);
5251 + printk("PC: <%pA>\n", (void *) r->pc);
5252 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5253 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5254 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5255 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5256 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5257 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5258 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5259 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5260
5261 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5262 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5263 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5264 rw = (struct reg_window32 *) fp;
5265 pc = rw->ins[7];
5266 printk("[%08lx : ", pc);
5267 - printk("%pS ] ", (void *) pc);
5268 + printk("%pA ] ", (void *) pc);
5269 fp = rw->ins[6];
5270 } while (++count < 16);
5271 printk("\n");
5272 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5273 index 39d8b05..d1a7d90 100644
5274 --- a/arch/sparc/kernel/process_64.c
5275 +++ b/arch/sparc/kernel/process_64.c
5276 @@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5277 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5278 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5279 if (regs->tstate & TSTATE_PRIV)
5280 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5281 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5282 }
5283
5284 void show_regs(struct pt_regs *regs)
5285 {
5286 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5287 regs->tpc, regs->tnpc, regs->y, print_tainted());
5288 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5289 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5290 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5291 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5292 regs->u_regs[3]);
5293 @@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5294 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5295 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5296 regs->u_regs[15]);
5297 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5298 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5299 show_regwindow(regs);
5300 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5301 }
5302 @@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5303 ((tp && tp->task) ? tp->task->pid : -1));
5304
5305 if (gp->tstate & TSTATE_PRIV) {
5306 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5307 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5308 (void *) gp->tpc,
5309 (void *) gp->o7,
5310 (void *) gp->i7,
5311 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5312 index 42b282f..28ce9f2 100644
5313 --- a/arch/sparc/kernel/sys_sparc_32.c
5314 +++ b/arch/sparc/kernel/sys_sparc_32.c
5315 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5316 if (ARCH_SUN4C && len > 0x20000000)
5317 return -ENOMEM;
5318 if (!addr)
5319 - addr = TASK_UNMAPPED_BASE;
5320 + addr = current->mm->mmap_base;
5321
5322 if (flags & MAP_SHARED)
5323 addr = COLOUR_ALIGN(addr);
5324 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5325 }
5326 if (TASK_SIZE - PAGE_SIZE - len < addr)
5327 return -ENOMEM;
5328 - if (!vmm || addr + len <= vmm->vm_start)
5329 + if (check_heap_stack_gap(vmm, addr, len))
5330 return addr;
5331 addr = vmm->vm_end;
5332 if (flags & MAP_SHARED)
5333 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5334 index 232df99..cee1f9c 100644
5335 --- a/arch/sparc/kernel/sys_sparc_64.c
5336 +++ b/arch/sparc/kernel/sys_sparc_64.c
5337 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 /* We do not accept a shared mapping if it would violate
5339 * cache aliasing constraints.
5340 */
5341 - if ((flags & MAP_SHARED) &&
5342 + if ((filp || (flags & MAP_SHARED)) &&
5343 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5344 return -EINVAL;
5345 return addr;
5346 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5347 if (filp || (flags & MAP_SHARED))
5348 do_color_align = 1;
5349
5350 +#ifdef CONFIG_PAX_RANDMMAP
5351 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5352 +#endif
5353 +
5354 if (addr) {
5355 if (do_color_align)
5356 addr = COLOUR_ALIGN(addr, pgoff);
5357 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5358 addr = PAGE_ALIGN(addr);
5359
5360 vma = find_vma(mm, addr);
5361 - if (task_size - len >= addr &&
5362 - (!vma || addr + len <= vma->vm_start))
5363 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5364 return addr;
5365 }
5366
5367 if (len > mm->cached_hole_size) {
5368 - start_addr = addr = mm->free_area_cache;
5369 + start_addr = addr = mm->free_area_cache;
5370 } else {
5371 - start_addr = addr = TASK_UNMAPPED_BASE;
5372 + start_addr = addr = mm->mmap_base;
5373 mm->cached_hole_size = 0;
5374 }
5375
5376 @@ -174,14 +177,14 @@ full_search:
5377 vma = find_vma(mm, VA_EXCLUDE_END);
5378 }
5379 if (unlikely(task_size < addr)) {
5380 - if (start_addr != TASK_UNMAPPED_BASE) {
5381 - start_addr = addr = TASK_UNMAPPED_BASE;
5382 + if (start_addr != mm->mmap_base) {
5383 + start_addr = addr = mm->mmap_base;
5384 mm->cached_hole_size = 0;
5385 goto full_search;
5386 }
5387 return -ENOMEM;
5388 }
5389 - if (likely(!vma || addr + len <= vma->vm_start)) {
5390 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5391 /*
5392 * Remember the place where we stopped the search:
5393 */
5394 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 /* We do not accept a shared mapping if it would violate
5396 * cache aliasing constraints.
5397 */
5398 - if ((flags & MAP_SHARED) &&
5399 + if ((filp || (flags & MAP_SHARED)) &&
5400 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5401 return -EINVAL;
5402 return addr;
5403 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5404 addr = PAGE_ALIGN(addr);
5405
5406 vma = find_vma(mm, addr);
5407 - if (task_size - len >= addr &&
5408 - (!vma || addr + len <= vma->vm_start))
5409 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5410 return addr;
5411 }
5412
5413 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 /* make sure it can fit in the remaining address space */
5415 if (likely(addr > len)) {
5416 vma = find_vma(mm, addr-len);
5417 - if (!vma || addr <= vma->vm_start) {
5418 + if (check_heap_stack_gap(vma, addr - len, len)) {
5419 /* remember the address as a hint for next time */
5420 return (mm->free_area_cache = addr-len);
5421 }
5422 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5423 if (unlikely(mm->mmap_base < len))
5424 goto bottomup;
5425
5426 - addr = mm->mmap_base-len;
5427 - if (do_color_align)
5428 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5429 + addr = mm->mmap_base - len;
5430
5431 do {
5432 + if (do_color_align)
5433 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5434 /*
5435 * Lookup failure means no vma is above this address,
5436 * else if new region fits below vma->vm_start,
5437 * return with success:
5438 */
5439 vma = find_vma(mm, addr);
5440 - if (likely(!vma || addr+len <= vma->vm_start)) {
5441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5442 /* remember the address as a hint for next time */
5443 return (mm->free_area_cache = addr);
5444 }
5445 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5446 mm->cached_hole_size = vma->vm_start - addr;
5447
5448 /* try just below the current vma->vm_start */
5449 - addr = vma->vm_start-len;
5450 - if (do_color_align)
5451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5452 - } while (likely(len < vma->vm_start));
5453 + addr = skip_heap_stack_gap(vma, len);
5454 + } while (!IS_ERR_VALUE(addr));
5455
5456 bottomup:
5457 /*
5458 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5459 gap == RLIM_INFINITY ||
5460 sysctl_legacy_va_layout) {
5461 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5462 +
5463 +#ifdef CONFIG_PAX_RANDMMAP
5464 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5465 + mm->mmap_base += mm->delta_mmap;
5466 +#endif
5467 +
5468 mm->get_unmapped_area = arch_get_unmapped_area;
5469 mm->unmap_area = arch_unmap_area;
5470 } else {
5471 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5472 gap = (task_size / 6 * 5);
5473
5474 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5475 +
5476 +#ifdef CONFIG_PAX_RANDMMAP
5477 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5478 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5479 +#endif
5480 +
5481 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5482 mm->unmap_area = arch_unmap_area_topdown;
5483 }
5484 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5485 index 591f20c..0f1b925 100644
5486 --- a/arch/sparc/kernel/traps_32.c
5487 +++ b/arch/sparc/kernel/traps_32.c
5488 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5489 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5490 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5491
5492 +extern void gr_handle_kernel_exploit(void);
5493 +
5494 void die_if_kernel(char *str, struct pt_regs *regs)
5495 {
5496 static int die_counter;
5497 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5498 count++ < 30 &&
5499 (((unsigned long) rw) >= PAGE_OFFSET) &&
5500 !(((unsigned long) rw) & 0x7)) {
5501 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5502 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5503 (void *) rw->ins[7]);
5504 rw = (struct reg_window32 *)rw->ins[6];
5505 }
5506 }
5507 printk("Instruction DUMP:");
5508 instruction_dump ((unsigned long *) regs->pc);
5509 - if(regs->psr & PSR_PS)
5510 + if(regs->psr & PSR_PS) {
5511 + gr_handle_kernel_exploit();
5512 do_exit(SIGKILL);
5513 + }
5514 do_exit(SIGSEGV);
5515 }
5516
5517 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5518 index 0cbdaa4..438e4c9 100644
5519 --- a/arch/sparc/kernel/traps_64.c
5520 +++ b/arch/sparc/kernel/traps_64.c
5521 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5522 i + 1,
5523 p->trapstack[i].tstate, p->trapstack[i].tpc,
5524 p->trapstack[i].tnpc, p->trapstack[i].tt);
5525 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5526 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5527 }
5528 }
5529
5530 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5531
5532 lvl -= 0x100;
5533 if (regs->tstate & TSTATE_PRIV) {
5534 +
5535 +#ifdef CONFIG_PAX_REFCOUNT
5536 + if (lvl == 6)
5537 + pax_report_refcount_overflow(regs);
5538 +#endif
5539 +
5540 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5541 die_if_kernel(buffer, regs);
5542 }
5543 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5544 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5545 {
5546 char buffer[32];
5547 -
5548 +
5549 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5550 0, lvl, SIGTRAP) == NOTIFY_STOP)
5551 return;
5552
5553 +#ifdef CONFIG_PAX_REFCOUNT
5554 + if (lvl == 6)
5555 + pax_report_refcount_overflow(regs);
5556 +#endif
5557 +
5558 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5559
5560 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5561 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5562 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5563 printk("%s" "ERROR(%d): ",
5564 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5565 - printk("TPC<%pS>\n", (void *) regs->tpc);
5566 + printk("TPC<%pA>\n", (void *) regs->tpc);
5567 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5568 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5569 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5570 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5571 smp_processor_id(),
5572 (type & 0x1) ? 'I' : 'D',
5573 regs->tpc);
5574 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5575 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5576 panic("Irrecoverable Cheetah+ parity error.");
5577 }
5578
5579 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5580 smp_processor_id(),
5581 (type & 0x1) ? 'I' : 'D',
5582 regs->tpc);
5583 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5584 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5585 }
5586
5587 struct sun4v_error_entry {
5588 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5589
5590 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5591 regs->tpc, tl);
5592 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5593 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5594 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5595 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5596 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5597 (void *) regs->u_regs[UREG_I7]);
5598 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5599 "pte[%lx] error[%lx]\n",
5600 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5601
5602 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5603 regs->tpc, tl);
5604 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5605 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5606 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5607 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5608 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5609 (void *) regs->u_regs[UREG_I7]);
5610 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5611 "pte[%lx] error[%lx]\n",
5612 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5613 fp = (unsigned long)sf->fp + STACK_BIAS;
5614 }
5615
5616 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5617 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5619 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5620 int index = tsk->curr_ret_stack;
5621 if (tsk->ret_stack && index >= graph) {
5622 pc = tsk->ret_stack[index - graph].ret;
5623 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5624 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5625 graph++;
5626 }
5627 }
5628 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5629 return (struct reg_window *) (fp + STACK_BIAS);
5630 }
5631
5632 +extern void gr_handle_kernel_exploit(void);
5633 +
5634 void die_if_kernel(char *str, struct pt_regs *regs)
5635 {
5636 static int die_counter;
5637 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5638 while (rw &&
5639 count++ < 30 &&
5640 kstack_valid(tp, (unsigned long) rw)) {
5641 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5642 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5643 (void *) rw->ins[7]);
5644
5645 rw = kernel_stack_up(rw);
5646 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5647 }
5648 user_instruction_dump ((unsigned int __user *) regs->tpc);
5649 }
5650 - if (regs->tstate & TSTATE_PRIV)
5651 + if (regs->tstate & TSTATE_PRIV) {
5652 + gr_handle_kernel_exploit();
5653 do_exit(SIGKILL);
5654 + }
5655 do_exit(SIGSEGV);
5656 }
5657 EXPORT_SYMBOL(die_if_kernel);
5658 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5659 index 76e4ac1..78f8bb1 100644
5660 --- a/arch/sparc/kernel/unaligned_64.c
5661 +++ b/arch/sparc/kernel/unaligned_64.c
5662 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5663 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5664
5665 if (__ratelimit(&ratelimit)) {
5666 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5667 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5668 regs->tpc, (void *) regs->tpc);
5669 }
5670 }
5671 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5672 index a3fc437..fea9957 100644
5673 --- a/arch/sparc/lib/Makefile
5674 +++ b/arch/sparc/lib/Makefile
5675 @@ -2,7 +2,7 @@
5676 #
5677
5678 asflags-y := -ansi -DST_DIV0=0x02
5679 -ccflags-y := -Werror
5680 +#ccflags-y := -Werror
5681
5682 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5683 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5684 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5685 index 59186e0..f747d7a 100644
5686 --- a/arch/sparc/lib/atomic_64.S
5687 +++ b/arch/sparc/lib/atomic_64.S
5688 @@ -18,7 +18,12 @@
5689 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5690 BACKOFF_SETUP(%o2)
5691 1: lduw [%o1], %g1
5692 - add %g1, %o0, %g7
5693 + addcc %g1, %o0, %g7
5694 +
5695 +#ifdef CONFIG_PAX_REFCOUNT
5696 + tvs %icc, 6
5697 +#endif
5698 +
5699 cas [%o1], %g1, %g7
5700 cmp %g1, %g7
5701 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5702 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5703 2: BACKOFF_SPIN(%o2, %o3, 1b)
5704 .size atomic_add, .-atomic_add
5705
5706 + .globl atomic_add_unchecked
5707 + .type atomic_add_unchecked,#function
5708 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5709 + BACKOFF_SETUP(%o2)
5710 +1: lduw [%o1], %g1
5711 + add %g1, %o0, %g7
5712 + cas [%o1], %g1, %g7
5713 + cmp %g1, %g7
5714 + bne,pn %icc, 2f
5715 + nop
5716 + retl
5717 + nop
5718 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5719 + .size atomic_add_unchecked, .-atomic_add_unchecked
5720 +
5721 .globl atomic_sub
5722 .type atomic_sub,#function
5723 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5724 BACKOFF_SETUP(%o2)
5725 1: lduw [%o1], %g1
5726 - sub %g1, %o0, %g7
5727 + subcc %g1, %o0, %g7
5728 +
5729 +#ifdef CONFIG_PAX_REFCOUNT
5730 + tvs %icc, 6
5731 +#endif
5732 +
5733 cas [%o1], %g1, %g7
5734 cmp %g1, %g7
5735 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5736 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5737 2: BACKOFF_SPIN(%o2, %o3, 1b)
5738 .size atomic_sub, .-atomic_sub
5739
5740 + .globl atomic_sub_unchecked
5741 + .type atomic_sub_unchecked,#function
5742 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5743 + BACKOFF_SETUP(%o2)
5744 +1: lduw [%o1], %g1
5745 + sub %g1, %o0, %g7
5746 + cas [%o1], %g1, %g7
5747 + cmp %g1, %g7
5748 + bne,pn %icc, 2f
5749 + nop
5750 + retl
5751 + nop
5752 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5753 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5754 +
5755 .globl atomic_add_ret
5756 .type atomic_add_ret,#function
5757 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5758 BACKOFF_SETUP(%o2)
5759 1: lduw [%o1], %g1
5760 - add %g1, %o0, %g7
5761 + addcc %g1, %o0, %g7
5762 +
5763 +#ifdef CONFIG_PAX_REFCOUNT
5764 + tvs %icc, 6
5765 +#endif
5766 +
5767 cas [%o1], %g1, %g7
5768 cmp %g1, %g7
5769 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5770 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5771 2: BACKOFF_SPIN(%o2, %o3, 1b)
5772 .size atomic_add_ret, .-atomic_add_ret
5773
5774 + .globl atomic_add_ret_unchecked
5775 + .type atomic_add_ret_unchecked,#function
5776 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5777 + BACKOFF_SETUP(%o2)
5778 +1: lduw [%o1], %g1
5779 + addcc %g1, %o0, %g7
5780 + cas [%o1], %g1, %g7
5781 + cmp %g1, %g7
5782 + bne,pn %icc, 2f
5783 + add %g7, %o0, %g7
5784 + sra %g7, 0, %o0
5785 + retl
5786 + nop
5787 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5788 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5789 +
5790 .globl atomic_sub_ret
5791 .type atomic_sub_ret,#function
5792 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5793 BACKOFF_SETUP(%o2)
5794 1: lduw [%o1], %g1
5795 - sub %g1, %o0, %g7
5796 + subcc %g1, %o0, %g7
5797 +
5798 +#ifdef CONFIG_PAX_REFCOUNT
5799 + tvs %icc, 6
5800 +#endif
5801 +
5802 cas [%o1], %g1, %g7
5803 cmp %g1, %g7
5804 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5805 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5806 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5807 BACKOFF_SETUP(%o2)
5808 1: ldx [%o1], %g1
5809 - add %g1, %o0, %g7
5810 + addcc %g1, %o0, %g7
5811 +
5812 +#ifdef CONFIG_PAX_REFCOUNT
5813 + tvs %xcc, 6
5814 +#endif
5815 +
5816 casx [%o1], %g1, %g7
5817 cmp %g1, %g7
5818 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5819 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5820 2: BACKOFF_SPIN(%o2, %o3, 1b)
5821 .size atomic64_add, .-atomic64_add
5822
5823 + .globl atomic64_add_unchecked
5824 + .type atomic64_add_unchecked,#function
5825 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5826 + BACKOFF_SETUP(%o2)
5827 +1: ldx [%o1], %g1
5828 + addcc %g1, %o0, %g7
5829 + casx [%o1], %g1, %g7
5830 + cmp %g1, %g7
5831 + bne,pn %xcc, 2f
5832 + nop
5833 + retl
5834 + nop
5835 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5836 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5837 +
5838 .globl atomic64_sub
5839 .type atomic64_sub,#function
5840 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5841 BACKOFF_SETUP(%o2)
5842 1: ldx [%o1], %g1
5843 - sub %g1, %o0, %g7
5844 + subcc %g1, %o0, %g7
5845 +
5846 +#ifdef CONFIG_PAX_REFCOUNT
5847 + tvs %xcc, 6
5848 +#endif
5849 +
5850 casx [%o1], %g1, %g7
5851 cmp %g1, %g7
5852 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5853 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5854 2: BACKOFF_SPIN(%o2, %o3, 1b)
5855 .size atomic64_sub, .-atomic64_sub
5856
5857 + .globl atomic64_sub_unchecked
5858 + .type atomic64_sub_unchecked,#function
5859 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5860 + BACKOFF_SETUP(%o2)
5861 +1: ldx [%o1], %g1
5862 + subcc %g1, %o0, %g7
5863 + casx [%o1], %g1, %g7
5864 + cmp %g1, %g7
5865 + bne,pn %xcc, 2f
5866 + nop
5867 + retl
5868 + nop
5869 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5870 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5871 +
5872 .globl atomic64_add_ret
5873 .type atomic64_add_ret,#function
5874 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5875 BACKOFF_SETUP(%o2)
5876 1: ldx [%o1], %g1
5877 - add %g1, %o0, %g7
5878 + addcc %g1, %o0, %g7
5879 +
5880 +#ifdef CONFIG_PAX_REFCOUNT
5881 + tvs %xcc, 6
5882 +#endif
5883 +
5884 casx [%o1], %g1, %g7
5885 cmp %g1, %g7
5886 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5887 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5888 2: BACKOFF_SPIN(%o2, %o3, 1b)
5889 .size atomic64_add_ret, .-atomic64_add_ret
5890
5891 + .globl atomic64_add_ret_unchecked
5892 + .type atomic64_add_ret_unchecked,#function
5893 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5894 + BACKOFF_SETUP(%o2)
5895 +1: ldx [%o1], %g1
5896 + addcc %g1, %o0, %g7
5897 + casx [%o1], %g1, %g7
5898 + cmp %g1, %g7
5899 + bne,pn %xcc, 2f
5900 + add %g7, %o0, %g7
5901 + mov %g7, %o0
5902 + retl
5903 + nop
5904 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5905 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5906 +
5907 .globl atomic64_sub_ret
5908 .type atomic64_sub_ret,#function
5909 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5910 BACKOFF_SETUP(%o2)
5911 1: ldx [%o1], %g1
5912 - sub %g1, %o0, %g7
5913 + subcc %g1, %o0, %g7
5914 +
5915 +#ifdef CONFIG_PAX_REFCOUNT
5916 + tvs %xcc, 6
5917 +#endif
5918 +
5919 casx [%o1], %g1, %g7
5920 cmp %g1, %g7
5921 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5922 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5923 index f73c224..662af10 100644
5924 --- a/arch/sparc/lib/ksyms.c
5925 +++ b/arch/sparc/lib/ksyms.c
5926 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
5927
5928 /* Atomic counter implementation. */
5929 EXPORT_SYMBOL(atomic_add);
5930 +EXPORT_SYMBOL(atomic_add_unchecked);
5931 EXPORT_SYMBOL(atomic_add_ret);
5932 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5933 EXPORT_SYMBOL(atomic_sub);
5934 +EXPORT_SYMBOL(atomic_sub_unchecked);
5935 EXPORT_SYMBOL(atomic_sub_ret);
5936 EXPORT_SYMBOL(atomic64_add);
5937 +EXPORT_SYMBOL(atomic64_add_unchecked);
5938 EXPORT_SYMBOL(atomic64_add_ret);
5939 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5940 EXPORT_SYMBOL(atomic64_sub);
5941 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5942 EXPORT_SYMBOL(atomic64_sub_ret);
5943
5944 /* Atomic bit operations. */
5945 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5946 index 301421c..e2535d1 100644
5947 --- a/arch/sparc/mm/Makefile
5948 +++ b/arch/sparc/mm/Makefile
5949 @@ -2,7 +2,7 @@
5950 #
5951
5952 asflags-y := -ansi
5953 -ccflags-y := -Werror
5954 +#ccflags-y := -Werror
5955
5956 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5957 obj-y += fault_$(BITS).o
5958 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5959 index 8023fd7..c8e89e9 100644
5960 --- a/arch/sparc/mm/fault_32.c
5961 +++ b/arch/sparc/mm/fault_32.c
5962 @@ -21,6 +21,9 @@
5963 #include <linux/perf_event.h>
5964 #include <linux/interrupt.h>
5965 #include <linux/kdebug.h>
5966 +#include <linux/slab.h>
5967 +#include <linux/pagemap.h>
5968 +#include <linux/compiler.h>
5969
5970 #include <asm/system.h>
5971 #include <asm/page.h>
5972 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5973 return safe_compute_effective_address(regs, insn);
5974 }
5975
5976 +#ifdef CONFIG_PAX_PAGEEXEC
5977 +#ifdef CONFIG_PAX_DLRESOLVE
5978 +static void pax_emuplt_close(struct vm_area_struct *vma)
5979 +{
5980 + vma->vm_mm->call_dl_resolve = 0UL;
5981 +}
5982 +
5983 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5984 +{
5985 + unsigned int *kaddr;
5986 +
5987 + vmf->page = alloc_page(GFP_HIGHUSER);
5988 + if (!vmf->page)
5989 + return VM_FAULT_OOM;
5990 +
5991 + kaddr = kmap(vmf->page);
5992 + memset(kaddr, 0, PAGE_SIZE);
5993 + kaddr[0] = 0x9DE3BFA8U; /* save */
5994 + flush_dcache_page(vmf->page);
5995 + kunmap(vmf->page);
5996 + return VM_FAULT_MAJOR;
5997 +}
5998 +
5999 +static const struct vm_operations_struct pax_vm_ops = {
6000 + .close = pax_emuplt_close,
6001 + .fault = pax_emuplt_fault
6002 +};
6003 +
6004 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6005 +{
6006 + int ret;
6007 +
6008 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6009 + vma->vm_mm = current->mm;
6010 + vma->vm_start = addr;
6011 + vma->vm_end = addr + PAGE_SIZE;
6012 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6013 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6014 + vma->vm_ops = &pax_vm_ops;
6015 +
6016 + ret = insert_vm_struct(current->mm, vma);
6017 + if (ret)
6018 + return ret;
6019 +
6020 + ++current->mm->total_vm;
6021 + return 0;
6022 +}
6023 +#endif
6024 +
6025 +/*
6026 + * PaX: decide what to do with offenders (regs->pc = fault address)
6027 + *
6028 + * returns 1 when task should be killed
6029 + * 2 when patched PLT trampoline was detected
6030 + * 3 when unpatched PLT trampoline was detected
6031 + */
6032 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6033 +{
6034 +
6035 +#ifdef CONFIG_PAX_EMUPLT
6036 + int err;
6037 +
6038 + do { /* PaX: patched PLT emulation #1 */
6039 + unsigned int sethi1, sethi2, jmpl;
6040 +
6041 + err = get_user(sethi1, (unsigned int *)regs->pc);
6042 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6043 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6044 +
6045 + if (err)
6046 + break;
6047 +
6048 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6049 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6050 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6051 + {
6052 + unsigned int addr;
6053 +
6054 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6055 + addr = regs->u_regs[UREG_G1];
6056 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6057 + regs->pc = addr;
6058 + regs->npc = addr+4;
6059 + return 2;
6060 + }
6061 + } while (0);
6062 +
6063 + { /* PaX: patched PLT emulation #2 */
6064 + unsigned int ba;
6065 +
6066 + err = get_user(ba, (unsigned int *)regs->pc);
6067 +
6068 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6069 + unsigned int addr;
6070 +
6071 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6072 + regs->pc = addr;
6073 + regs->npc = addr+4;
6074 + return 2;
6075 + }
6076 + }
6077 +
6078 + do { /* PaX: patched PLT emulation #3 */
6079 + unsigned int sethi, jmpl, nop;
6080 +
6081 + err = get_user(sethi, (unsigned int *)regs->pc);
6082 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6083 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6084 +
6085 + if (err)
6086 + break;
6087 +
6088 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6089 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6090 + nop == 0x01000000U)
6091 + {
6092 + unsigned int addr;
6093 +
6094 + addr = (sethi & 0x003FFFFFU) << 10;
6095 + regs->u_regs[UREG_G1] = addr;
6096 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6097 + regs->pc = addr;
6098 + regs->npc = addr+4;
6099 + return 2;
6100 + }
6101 + } while (0);
6102 +
6103 + do { /* PaX: unpatched PLT emulation step 1 */
6104 + unsigned int sethi, ba, nop;
6105 +
6106 + err = get_user(sethi, (unsigned int *)regs->pc);
6107 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6108 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6109 +
6110 + if (err)
6111 + break;
6112 +
6113 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6114 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6115 + nop == 0x01000000U)
6116 + {
6117 + unsigned int addr, save, call;
6118 +
6119 + if ((ba & 0xFFC00000U) == 0x30800000U)
6120 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6121 + else
6122 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6123 +
6124 + err = get_user(save, (unsigned int *)addr);
6125 + err |= get_user(call, (unsigned int *)(addr+4));
6126 + err |= get_user(nop, (unsigned int *)(addr+8));
6127 + if (err)
6128 + break;
6129 +
6130 +#ifdef CONFIG_PAX_DLRESOLVE
6131 + if (save == 0x9DE3BFA8U &&
6132 + (call & 0xC0000000U) == 0x40000000U &&
6133 + nop == 0x01000000U)
6134 + {
6135 + struct vm_area_struct *vma;
6136 + unsigned long call_dl_resolve;
6137 +
6138 + down_read(&current->mm->mmap_sem);
6139 + call_dl_resolve = current->mm->call_dl_resolve;
6140 + up_read(&current->mm->mmap_sem);
6141 + if (likely(call_dl_resolve))
6142 + goto emulate;
6143 +
6144 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6145 +
6146 + down_write(&current->mm->mmap_sem);
6147 + if (current->mm->call_dl_resolve) {
6148 + call_dl_resolve = current->mm->call_dl_resolve;
6149 + up_write(&current->mm->mmap_sem);
6150 + if (vma)
6151 + kmem_cache_free(vm_area_cachep, vma);
6152 + goto emulate;
6153 + }
6154 +
6155 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6156 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6157 + up_write(&current->mm->mmap_sem);
6158 + if (vma)
6159 + kmem_cache_free(vm_area_cachep, vma);
6160 + return 1;
6161 + }
6162 +
6163 + if (pax_insert_vma(vma, call_dl_resolve)) {
6164 + up_write(&current->mm->mmap_sem);
6165 + kmem_cache_free(vm_area_cachep, vma);
6166 + return 1;
6167 + }
6168 +
6169 + current->mm->call_dl_resolve = call_dl_resolve;
6170 + up_write(&current->mm->mmap_sem);
6171 +
6172 +emulate:
6173 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6174 + regs->pc = call_dl_resolve;
6175 + regs->npc = addr+4;
6176 + return 3;
6177 + }
6178 +#endif
6179 +
6180 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6181 + if ((save & 0xFFC00000U) == 0x05000000U &&
6182 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6183 + nop == 0x01000000U)
6184 + {
6185 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6186 + regs->u_regs[UREG_G2] = addr + 4;
6187 + addr = (save & 0x003FFFFFU) << 10;
6188 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6189 + regs->pc = addr;
6190 + regs->npc = addr+4;
6191 + return 3;
6192 + }
6193 + }
6194 + } while (0);
6195 +
6196 + do { /* PaX: unpatched PLT emulation step 2 */
6197 + unsigned int save, call, nop;
6198 +
6199 + err = get_user(save, (unsigned int *)(regs->pc-4));
6200 + err |= get_user(call, (unsigned int *)regs->pc);
6201 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6202 + if (err)
6203 + break;
6204 +
6205 + if (save == 0x9DE3BFA8U &&
6206 + (call & 0xC0000000U) == 0x40000000U &&
6207 + nop == 0x01000000U)
6208 + {
6209 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6210 +
6211 + regs->u_regs[UREG_RETPC] = regs->pc;
6212 + regs->pc = dl_resolve;
6213 + regs->npc = dl_resolve+4;
6214 + return 3;
6215 + }
6216 + } while (0);
6217 +#endif
6218 +
6219 + return 1;
6220 +}
6221 +
6222 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6223 +{
6224 + unsigned long i;
6225 +
6226 + printk(KERN_ERR "PAX: bytes at PC: ");
6227 + for (i = 0; i < 8; i++) {
6228 + unsigned int c;
6229 + if (get_user(c, (unsigned int *)pc+i))
6230 + printk(KERN_CONT "???????? ");
6231 + else
6232 + printk(KERN_CONT "%08x ", c);
6233 + }
6234 + printk("\n");
6235 +}
6236 +#endif
6237 +
6238 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6239 int text_fault)
6240 {
6241 @@ -280,6 +545,24 @@ good_area:
6242 if(!(vma->vm_flags & VM_WRITE))
6243 goto bad_area;
6244 } else {
6245 +
6246 +#ifdef CONFIG_PAX_PAGEEXEC
6247 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6248 + up_read(&mm->mmap_sem);
6249 + switch (pax_handle_fetch_fault(regs)) {
6250 +
6251 +#ifdef CONFIG_PAX_EMUPLT
6252 + case 2:
6253 + case 3:
6254 + return;
6255 +#endif
6256 +
6257 + }
6258 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6259 + do_group_exit(SIGKILL);
6260 + }
6261 +#endif
6262 +
6263 /* Allow reads even for write-only mappings */
6264 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6265 goto bad_area;
6266 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6267 index 504c062..6fcb9c6 100644
6268 --- a/arch/sparc/mm/fault_64.c
6269 +++ b/arch/sparc/mm/fault_64.c
6270 @@ -21,6 +21,9 @@
6271 #include <linux/kprobes.h>
6272 #include <linux/kdebug.h>
6273 #include <linux/percpu.h>
6274 +#include <linux/slab.h>
6275 +#include <linux/pagemap.h>
6276 +#include <linux/compiler.h>
6277
6278 #include <asm/page.h>
6279 #include <asm/pgtable.h>
6280 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6281 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6282 regs->tpc);
6283 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6284 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6285 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6286 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6287 dump_stack();
6288 unhandled_fault(regs->tpc, current, regs);
6289 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6290 show_regs(regs);
6291 }
6292
6293 +#ifdef CONFIG_PAX_PAGEEXEC
6294 +#ifdef CONFIG_PAX_DLRESOLVE
6295 +static void pax_emuplt_close(struct vm_area_struct *vma)
6296 +{
6297 + vma->vm_mm->call_dl_resolve = 0UL;
6298 +}
6299 +
6300 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6301 +{
6302 + unsigned int *kaddr;
6303 +
6304 + vmf->page = alloc_page(GFP_HIGHUSER);
6305 + if (!vmf->page)
6306 + return VM_FAULT_OOM;
6307 +
6308 + kaddr = kmap(vmf->page);
6309 + memset(kaddr, 0, PAGE_SIZE);
6310 + kaddr[0] = 0x9DE3BFA8U; /* save */
6311 + flush_dcache_page(vmf->page);
6312 + kunmap(vmf->page);
6313 + return VM_FAULT_MAJOR;
6314 +}
6315 +
6316 +static const struct vm_operations_struct pax_vm_ops = {
6317 + .close = pax_emuplt_close,
6318 + .fault = pax_emuplt_fault
6319 +};
6320 +
6321 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6322 +{
6323 + int ret;
6324 +
6325 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6326 + vma->vm_mm = current->mm;
6327 + vma->vm_start = addr;
6328 + vma->vm_end = addr + PAGE_SIZE;
6329 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6330 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6331 + vma->vm_ops = &pax_vm_ops;
6332 +
6333 + ret = insert_vm_struct(current->mm, vma);
6334 + if (ret)
6335 + return ret;
6336 +
6337 + ++current->mm->total_vm;
6338 + return 0;
6339 +}
6340 +#endif
6341 +
6342 +/*
6343 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6344 + *
6345 + * returns 1 when task should be killed
6346 + * 2 when patched PLT trampoline was detected
6347 + * 3 when unpatched PLT trampoline was detected
6348 + */
6349 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6350 +{
6351 +
6352 +#ifdef CONFIG_PAX_EMUPLT
6353 + int err;
6354 +
6355 + do { /* PaX: patched PLT emulation #1 */
6356 + unsigned int sethi1, sethi2, jmpl;
6357 +
6358 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6359 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6360 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6361 +
6362 + if (err)
6363 + break;
6364 +
6365 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6366 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6367 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6368 + {
6369 + unsigned long addr;
6370 +
6371 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6372 + addr = regs->u_regs[UREG_G1];
6373 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6374 +
6375 + if (test_thread_flag(TIF_32BIT))
6376 + addr &= 0xFFFFFFFFUL;
6377 +
6378 + regs->tpc = addr;
6379 + regs->tnpc = addr+4;
6380 + return 2;
6381 + }
6382 + } while (0);
6383 +
6384 + { /* PaX: patched PLT emulation #2 */
6385 + unsigned int ba;
6386 +
6387 + err = get_user(ba, (unsigned int *)regs->tpc);
6388 +
6389 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6390 + unsigned long addr;
6391 +
6392 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6393 +
6394 + if (test_thread_flag(TIF_32BIT))
6395 + addr &= 0xFFFFFFFFUL;
6396 +
6397 + regs->tpc = addr;
6398 + regs->tnpc = addr+4;
6399 + return 2;
6400 + }
6401 + }
6402 +
6403 + do { /* PaX: patched PLT emulation #3 */
6404 + unsigned int sethi, jmpl, nop;
6405 +
6406 + err = get_user(sethi, (unsigned int *)regs->tpc);
6407 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6408 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6409 +
6410 + if (err)
6411 + break;
6412 +
6413 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6414 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6415 + nop == 0x01000000U)
6416 + {
6417 + unsigned long addr;
6418 +
6419 + addr = (sethi & 0x003FFFFFU) << 10;
6420 + regs->u_regs[UREG_G1] = addr;
6421 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6422 +
6423 + if (test_thread_flag(TIF_32BIT))
6424 + addr &= 0xFFFFFFFFUL;
6425 +
6426 + regs->tpc = addr;
6427 + regs->tnpc = addr+4;
6428 + return 2;
6429 + }
6430 + } while (0);
6431 +
6432 + do { /* PaX: patched PLT emulation #4 */
6433 + unsigned int sethi, mov1, call, mov2;
6434 +
6435 + err = get_user(sethi, (unsigned int *)regs->tpc);
6436 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6437 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6438 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6439 +
6440 + if (err)
6441 + break;
6442 +
6443 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6444 + mov1 == 0x8210000FU &&
6445 + (call & 0xC0000000U) == 0x40000000U &&
6446 + mov2 == 0x9E100001U)
6447 + {
6448 + unsigned long addr;
6449 +
6450 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6451 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6452 +
6453 + if (test_thread_flag(TIF_32BIT))
6454 + addr &= 0xFFFFFFFFUL;
6455 +
6456 + regs->tpc = addr;
6457 + regs->tnpc = addr+4;
6458 + return 2;
6459 + }
6460 + } while (0);
6461 +
6462 + do { /* PaX: patched PLT emulation #5 */
6463 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6464 +
6465 + err = get_user(sethi, (unsigned int *)regs->tpc);
6466 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6467 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6468 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6469 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6470 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6471 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6472 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6473 +
6474 + if (err)
6475 + break;
6476 +
6477 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6478 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6479 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6480 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6481 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6482 + sllx == 0x83287020U &&
6483 + jmpl == 0x81C04005U &&
6484 + nop == 0x01000000U)
6485 + {
6486 + unsigned long addr;
6487 +
6488 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6489 + regs->u_regs[UREG_G1] <<= 32;
6490 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6491 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6492 + regs->tpc = addr;
6493 + regs->tnpc = addr+4;
6494 + return 2;
6495 + }
6496 + } while (0);
6497 +
6498 + do { /* PaX: patched PLT emulation #6 */
6499 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6500 +
6501 + err = get_user(sethi, (unsigned int *)regs->tpc);
6502 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6503 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6504 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6505 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6506 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6507 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6508 +
6509 + if (err)
6510 + break;
6511 +
6512 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6513 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6514 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6515 + sllx == 0x83287020U &&
6516 + (or & 0xFFFFE000U) == 0x8A116000U &&
6517 + jmpl == 0x81C04005U &&
6518 + nop == 0x01000000U)
6519 + {
6520 + unsigned long addr;
6521 +
6522 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6523 + regs->u_regs[UREG_G1] <<= 32;
6524 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6525 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6526 + regs->tpc = addr;
6527 + regs->tnpc = addr+4;
6528 + return 2;
6529 + }
6530 + } while (0);
6531 +
6532 + do { /* PaX: unpatched PLT emulation step 1 */
6533 + unsigned int sethi, ba, nop;
6534 +
6535 + err = get_user(sethi, (unsigned int *)regs->tpc);
6536 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6537 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6538 +
6539 + if (err)
6540 + break;
6541 +
6542 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6543 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6544 + nop == 0x01000000U)
6545 + {
6546 + unsigned long addr;
6547 + unsigned int save, call;
6548 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6549 +
6550 + if ((ba & 0xFFC00000U) == 0x30800000U)
6551 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6552 + else
6553 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6554 +
6555 + if (test_thread_flag(TIF_32BIT))
6556 + addr &= 0xFFFFFFFFUL;
6557 +
6558 + err = get_user(save, (unsigned int *)addr);
6559 + err |= get_user(call, (unsigned int *)(addr+4));
6560 + err |= get_user(nop, (unsigned int *)(addr+8));
6561 + if (err)
6562 + break;
6563 +
6564 +#ifdef CONFIG_PAX_DLRESOLVE
6565 + if (save == 0x9DE3BFA8U &&
6566 + (call & 0xC0000000U) == 0x40000000U &&
6567 + nop == 0x01000000U)
6568 + {
6569 + struct vm_area_struct *vma;
6570 + unsigned long call_dl_resolve;
6571 +
6572 + down_read(&current->mm->mmap_sem);
6573 + call_dl_resolve = current->mm->call_dl_resolve;
6574 + up_read(&current->mm->mmap_sem);
6575 + if (likely(call_dl_resolve))
6576 + goto emulate;
6577 +
6578 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6579 +
6580 + down_write(&current->mm->mmap_sem);
6581 + if (current->mm->call_dl_resolve) {
6582 + call_dl_resolve = current->mm->call_dl_resolve;
6583 + up_write(&current->mm->mmap_sem);
6584 + if (vma)
6585 + kmem_cache_free(vm_area_cachep, vma);
6586 + goto emulate;
6587 + }
6588 +
6589 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6590 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6591 + up_write(&current->mm->mmap_sem);
6592 + if (vma)
6593 + kmem_cache_free(vm_area_cachep, vma);
6594 + return 1;
6595 + }
6596 +
6597 + if (pax_insert_vma(vma, call_dl_resolve)) {
6598 + up_write(&current->mm->mmap_sem);
6599 + kmem_cache_free(vm_area_cachep, vma);
6600 + return 1;
6601 + }
6602 +
6603 + current->mm->call_dl_resolve = call_dl_resolve;
6604 + up_write(&current->mm->mmap_sem);
6605 +
6606 +emulate:
6607 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6608 + regs->tpc = call_dl_resolve;
6609 + regs->tnpc = addr+4;
6610 + return 3;
6611 + }
6612 +#endif
6613 +
6614 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6615 + if ((save & 0xFFC00000U) == 0x05000000U &&
6616 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6617 + nop == 0x01000000U)
6618 + {
6619 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6620 + regs->u_regs[UREG_G2] = addr + 4;
6621 + addr = (save & 0x003FFFFFU) << 10;
6622 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6623 +
6624 + if (test_thread_flag(TIF_32BIT))
6625 + addr &= 0xFFFFFFFFUL;
6626 +
6627 + regs->tpc = addr;
6628 + regs->tnpc = addr+4;
6629 + return 3;
6630 + }
6631 +
6632 + /* PaX: 64-bit PLT stub */
6633 + err = get_user(sethi1, (unsigned int *)addr);
6634 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6635 + err |= get_user(or1, (unsigned int *)(addr+8));
6636 + err |= get_user(or2, (unsigned int *)(addr+12));
6637 + err |= get_user(sllx, (unsigned int *)(addr+16));
6638 + err |= get_user(add, (unsigned int *)(addr+20));
6639 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6640 + err |= get_user(nop, (unsigned int *)(addr+28));
6641 + if (err)
6642 + break;
6643 +
6644 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6646 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6647 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6648 + sllx == 0x89293020U &&
6649 + add == 0x8A010005U &&
6650 + jmpl == 0x89C14000U &&
6651 + nop == 0x01000000U)
6652 + {
6653 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6654 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6655 + regs->u_regs[UREG_G4] <<= 32;
6656 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6657 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6658 + regs->u_regs[UREG_G4] = addr + 24;
6659 + addr = regs->u_regs[UREG_G5];
6660 + regs->tpc = addr;
6661 + regs->tnpc = addr+4;
6662 + return 3;
6663 + }
6664 + }
6665 + } while (0);
6666 +
6667 +#ifdef CONFIG_PAX_DLRESOLVE
6668 + do { /* PaX: unpatched PLT emulation step 2 */
6669 + unsigned int save, call, nop;
6670 +
6671 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6672 + err |= get_user(call, (unsigned int *)regs->tpc);
6673 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6674 + if (err)
6675 + break;
6676 +
6677 + if (save == 0x9DE3BFA8U &&
6678 + (call & 0xC0000000U) == 0x40000000U &&
6679 + nop == 0x01000000U)
6680 + {
6681 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6682 +
6683 + if (test_thread_flag(TIF_32BIT))
6684 + dl_resolve &= 0xFFFFFFFFUL;
6685 +
6686 + regs->u_regs[UREG_RETPC] = regs->tpc;
6687 + regs->tpc = dl_resolve;
6688 + regs->tnpc = dl_resolve+4;
6689 + return 3;
6690 + }
6691 + } while (0);
6692 +#endif
6693 +
6694 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6695 + unsigned int sethi, ba, nop;
6696 +
6697 + err = get_user(sethi, (unsigned int *)regs->tpc);
6698 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6699 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6700 +
6701 + if (err)
6702 + break;
6703 +
6704 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6705 + (ba & 0xFFF00000U) == 0x30600000U &&
6706 + nop == 0x01000000U)
6707 + {
6708 + unsigned long addr;
6709 +
6710 + addr = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G1] = addr;
6712 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6713 +
6714 + if (test_thread_flag(TIF_32BIT))
6715 + addr &= 0xFFFFFFFFUL;
6716 +
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 2;
6720 + }
6721 + } while (0);
6722 +
6723 +#endif
6724 +
6725 + return 1;
6726 +}
6727 +
6728 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6729 +{
6730 + unsigned long i;
6731 +
6732 + printk(KERN_ERR "PAX: bytes at PC: ");
6733 + for (i = 0; i < 8; i++) {
6734 + unsigned int c;
6735 + if (get_user(c, (unsigned int *)pc+i))
6736 + printk(KERN_CONT "???????? ");
6737 + else
6738 + printk(KERN_CONT "%08x ", c);
6739 + }
6740 + printk("\n");
6741 +}
6742 +#endif
6743 +
6744 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6745 {
6746 struct mm_struct *mm = current->mm;
6747 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6748 if (!vma)
6749 goto bad_area;
6750
6751 +#ifdef CONFIG_PAX_PAGEEXEC
6752 + /* PaX: detect ITLB misses on non-exec pages */
6753 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6754 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6755 + {
6756 + if (address != regs->tpc)
6757 + goto good_area;
6758 +
6759 + up_read(&mm->mmap_sem);
6760 + switch (pax_handle_fetch_fault(regs)) {
6761 +
6762 +#ifdef CONFIG_PAX_EMUPLT
6763 + case 2:
6764 + case 3:
6765 + return;
6766 +#endif
6767 +
6768 + }
6769 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6770 + do_group_exit(SIGKILL);
6771 + }
6772 +#endif
6773 +
6774 /* Pure DTLB misses do not tell us whether the fault causing
6775 * load/store/atomic was a write or not, it only says that there
6776 * was no match. So in such a case we (carefully) read the
6777 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6778 index 07e1453..0a7d9e9 100644
6779 --- a/arch/sparc/mm/hugetlbpage.c
6780 +++ b/arch/sparc/mm/hugetlbpage.c
6781 @@ -67,7 +67,7 @@ full_search:
6782 }
6783 return -ENOMEM;
6784 }
6785 - if (likely(!vma || addr + len <= vma->vm_start)) {
6786 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6787 /*
6788 * Remember the place where we stopped the search:
6789 */
6790 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6791 /* make sure it can fit in the remaining address space */
6792 if (likely(addr > len)) {
6793 vma = find_vma(mm, addr-len);
6794 - if (!vma || addr <= vma->vm_start) {
6795 + if (check_heap_stack_gap(vma, addr - len, len)) {
6796 /* remember the address as a hint for next time */
6797 return (mm->free_area_cache = addr-len);
6798 }
6799 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6800 if (unlikely(mm->mmap_base < len))
6801 goto bottomup;
6802
6803 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6804 + addr = mm->mmap_base - len;
6805
6806 do {
6807 + addr &= HPAGE_MASK;
6808 /*
6809 * Lookup failure means no vma is above this address,
6810 * else if new region fits below vma->vm_start,
6811 * return with success:
6812 */
6813 vma = find_vma(mm, addr);
6814 - if (likely(!vma || addr+len <= vma->vm_start)) {
6815 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6816 /* remember the address as a hint for next time */
6817 return (mm->free_area_cache = addr);
6818 }
6819 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6820 mm->cached_hole_size = vma->vm_start - addr;
6821
6822 /* try just below the current vma->vm_start */
6823 - addr = (vma->vm_start-len) & HPAGE_MASK;
6824 - } while (likely(len < vma->vm_start));
6825 + addr = skip_heap_stack_gap(vma, len);
6826 + } while (!IS_ERR_VALUE(addr));
6827
6828 bottomup:
6829 /*
6830 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6831 if (addr) {
6832 addr = ALIGN(addr, HPAGE_SIZE);
6833 vma = find_vma(mm, addr);
6834 - if (task_size - len >= addr &&
6835 - (!vma || addr + len <= vma->vm_start))
6836 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6837 return addr;
6838 }
6839 if (mm->get_unmapped_area == arch_get_unmapped_area)
6840 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6841 index 7b00de6..78239f4 100644
6842 --- a/arch/sparc/mm/init_32.c
6843 +++ b/arch/sparc/mm/init_32.c
6844 @@ -316,6 +316,9 @@ extern void device_scan(void);
6845 pgprot_t PAGE_SHARED __read_mostly;
6846 EXPORT_SYMBOL(PAGE_SHARED);
6847
6848 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6849 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6850 +
6851 void __init paging_init(void)
6852 {
6853 switch(sparc_cpu_model) {
6854 @@ -344,17 +347,17 @@ void __init paging_init(void)
6855
6856 /* Initialize the protection map with non-constant, MMU dependent values. */
6857 protection_map[0] = PAGE_NONE;
6858 - protection_map[1] = PAGE_READONLY;
6859 - protection_map[2] = PAGE_COPY;
6860 - protection_map[3] = PAGE_COPY;
6861 + protection_map[1] = PAGE_READONLY_NOEXEC;
6862 + protection_map[2] = PAGE_COPY_NOEXEC;
6863 + protection_map[3] = PAGE_COPY_NOEXEC;
6864 protection_map[4] = PAGE_READONLY;
6865 protection_map[5] = PAGE_READONLY;
6866 protection_map[6] = PAGE_COPY;
6867 protection_map[7] = PAGE_COPY;
6868 protection_map[8] = PAGE_NONE;
6869 - protection_map[9] = PAGE_READONLY;
6870 - protection_map[10] = PAGE_SHARED;
6871 - protection_map[11] = PAGE_SHARED;
6872 + protection_map[9] = PAGE_READONLY_NOEXEC;
6873 + protection_map[10] = PAGE_SHARED_NOEXEC;
6874 + protection_map[11] = PAGE_SHARED_NOEXEC;
6875 protection_map[12] = PAGE_READONLY;
6876 protection_map[13] = PAGE_READONLY;
6877 protection_map[14] = PAGE_SHARED;
6878 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6879 index cbef74e..c38fead 100644
6880 --- a/arch/sparc/mm/srmmu.c
6881 +++ b/arch/sparc/mm/srmmu.c
6882 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6883 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6884 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6885 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6886 +
6887 +#ifdef CONFIG_PAX_PAGEEXEC
6888 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6889 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6890 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6891 +#endif
6892 +
6893 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6894 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6895
6896 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6897 index 27fe667..36d474c 100644
6898 --- a/arch/tile/include/asm/atomic_64.h
6899 +++ b/arch/tile/include/asm/atomic_64.h
6900 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6901
6902 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6903
6904 +#define atomic64_read_unchecked(v) atomic64_read(v)
6905 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6906 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6907 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6908 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6909 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6910 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6911 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6912 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6913 +
6914 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6915 #define smp_mb__before_atomic_dec() smp_mb()
6916 #define smp_mb__after_atomic_dec() smp_mb()
6917 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6918 index 392e533..536b092 100644
6919 --- a/arch/tile/include/asm/cache.h
6920 +++ b/arch/tile/include/asm/cache.h
6921 @@ -15,11 +15,12 @@
6922 #ifndef _ASM_TILE_CACHE_H
6923 #define _ASM_TILE_CACHE_H
6924
6925 +#include <linux/const.h>
6926 #include <arch/chip.h>
6927
6928 /* bytes per L1 data cache line */
6929 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6930 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6931 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6932
6933 /* bytes per L2 cache line */
6934 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6935 diff --git a/arch/um/Makefile b/arch/um/Makefile
6936 index 28688e6..4c0aa1c 100644
6937 --- a/arch/um/Makefile
6938 +++ b/arch/um/Makefile
6939 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6940 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6941 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6942
6943 +ifdef CONSTIFY_PLUGIN
6944 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6945 +endif
6946 +
6947 #This will adjust *FLAGS accordingly to the platform.
6948 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6949
6950 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6951 index 19e1bdd..3665b77 100644
6952 --- a/arch/um/include/asm/cache.h
6953 +++ b/arch/um/include/asm/cache.h
6954 @@ -1,6 +1,7 @@
6955 #ifndef __UM_CACHE_H
6956 #define __UM_CACHE_H
6957
6958 +#include <linux/const.h>
6959
6960 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6961 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6962 @@ -12,6 +13,6 @@
6963 # define L1_CACHE_SHIFT 5
6964 #endif
6965
6966 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6967 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6968
6969 #endif
6970 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6971 index 6c03acd..a5e0215 100644
6972 --- a/arch/um/include/asm/kmap_types.h
6973 +++ b/arch/um/include/asm/kmap_types.h
6974 @@ -23,6 +23,7 @@ enum km_type {
6975 KM_IRQ1,
6976 KM_SOFTIRQ0,
6977 KM_SOFTIRQ1,
6978 + KM_CLEARPAGE,
6979 KM_TYPE_NR
6980 };
6981
6982 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6983 index 7cfc3ce..cbd1a58 100644
6984 --- a/arch/um/include/asm/page.h
6985 +++ b/arch/um/include/asm/page.h
6986 @@ -14,6 +14,9 @@
6987 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6988 #define PAGE_MASK (~(PAGE_SIZE-1))
6989
6990 +#define ktla_ktva(addr) (addr)
6991 +#define ktva_ktla(addr) (addr)
6992 +
6993 #ifndef __ASSEMBLY__
6994
6995 struct page;
6996 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6997 index 69f2490..2634831 100644
6998 --- a/arch/um/kernel/process.c
6999 +++ b/arch/um/kernel/process.c
7000 @@ -408,22 +408,6 @@ int singlestepping(void * t)
7001 return 2;
7002 }
7003
7004 -/*
7005 - * Only x86 and x86_64 have an arch_align_stack().
7006 - * All other arches have "#define arch_align_stack(x) (x)"
7007 - * in their asm/system.h
7008 - * As this is included in UML from asm-um/system-generic.h,
7009 - * we can use it to behave as the subarch does.
7010 - */
7011 -#ifndef arch_align_stack
7012 -unsigned long arch_align_stack(unsigned long sp)
7013 -{
7014 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7015 - sp -= get_random_int() % 8192;
7016 - return sp & ~0xf;
7017 -}
7018 -#endif
7019 -
7020 unsigned long get_wchan(struct task_struct *p)
7021 {
7022 unsigned long stack_page, sp, ip;
7023 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7024 index ad8f795..2c7eec6 100644
7025 --- a/arch/unicore32/include/asm/cache.h
7026 +++ b/arch/unicore32/include/asm/cache.h
7027 @@ -12,8 +12,10 @@
7028 #ifndef __UNICORE_CACHE_H__
7029 #define __UNICORE_CACHE_H__
7030
7031 -#define L1_CACHE_SHIFT (5)
7032 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7033 +#include <linux/const.h>
7034 +
7035 +#define L1_CACHE_SHIFT 5
7036 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7037
7038 /*
7039 * Memory returned by kmalloc() may be used for DMA, so we must make
7040 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7041 index 5bed94e..fbcf200 100644
7042 --- a/arch/x86/Kconfig
7043 +++ b/arch/x86/Kconfig
7044 @@ -226,7 +226,7 @@ config X86_HT
7045
7046 config X86_32_LAZY_GS
7047 def_bool y
7048 - depends on X86_32 && !CC_STACKPROTECTOR
7049 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7050
7051 config ARCH_HWEIGHT_CFLAGS
7052 string
7053 @@ -1058,7 +1058,7 @@ choice
7054
7055 config NOHIGHMEM
7056 bool "off"
7057 - depends on !X86_NUMAQ
7058 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7059 ---help---
7060 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7061 However, the address space of 32-bit x86 processors is only 4
7062 @@ -1095,7 +1095,7 @@ config NOHIGHMEM
7063
7064 config HIGHMEM4G
7065 bool "4GB"
7066 - depends on !X86_NUMAQ
7067 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7068 ---help---
7069 Select this if you have a 32-bit processor and between 1 and 4
7070 gigabytes of physical RAM.
7071 @@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7072 hex
7073 default 0xB0000000 if VMSPLIT_3G_OPT
7074 default 0x80000000 if VMSPLIT_2G
7075 - default 0x78000000 if VMSPLIT_2G_OPT
7076 + default 0x70000000 if VMSPLIT_2G_OPT
7077 default 0x40000000 if VMSPLIT_1G
7078 default 0xC0000000
7079 depends on X86_32
7080 @@ -1539,6 +1539,7 @@ config SECCOMP
7081
7082 config CC_STACKPROTECTOR
7083 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7084 + depends on X86_64 || !PAX_MEMORY_UDEREF
7085 ---help---
7086 This option turns on the -fstack-protector GCC feature. This
7087 feature puts, at the beginning of functions, a canary value on
7088 @@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7089 config PHYSICAL_START
7090 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7091 default "0x1000000"
7092 + range 0x400000 0x40000000
7093 ---help---
7094 This gives the physical address where the kernel is loaded.
7095
7096 @@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7097 config PHYSICAL_ALIGN
7098 hex "Alignment value to which kernel should be aligned" if X86_32
7099 default "0x1000000"
7100 + range 0x400000 0x1000000 if PAX_KERNEXEC
7101 range 0x2000 0x1000000
7102 ---help---
7103 This value puts the alignment restrictions on physical address
7104 @@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7105 Say N if you want to disable CPU hotplug.
7106
7107 config COMPAT_VDSO
7108 - def_bool y
7109 + def_bool n
7110 prompt "Compat VDSO support"
7111 depends on X86_32 || IA32_EMULATION
7112 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7113 ---help---
7114 Map the 32-bit VDSO to the predictable old-style address too.
7115
7116 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7117 index 3c57033..22d44aa 100644
7118 --- a/arch/x86/Kconfig.cpu
7119 +++ b/arch/x86/Kconfig.cpu
7120 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7121
7122 config X86_F00F_BUG
7123 def_bool y
7124 - depends on M586MMX || M586TSC || M586 || M486 || M386
7125 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7126
7127 config X86_INVD_BUG
7128 def_bool y
7129 @@ -359,7 +359,7 @@ config X86_POPAD_OK
7130
7131 config X86_ALIGNMENT_16
7132 def_bool y
7133 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7134 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7135
7136 config X86_INTEL_USERCOPY
7137 def_bool y
7138 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
7139 # generates cmov.
7140 config X86_CMOV
7141 def_bool y
7142 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7143 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7144
7145 config X86_MINIMUM_CPU_FAMILY
7146 int
7147 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7148 index e46c214..7c72b55 100644
7149 --- a/arch/x86/Kconfig.debug
7150 +++ b/arch/x86/Kconfig.debug
7151 @@ -84,7 +84,7 @@ config X86_PTDUMP
7152 config DEBUG_RODATA
7153 bool "Write protect kernel read-only data structures"
7154 default y
7155 - depends on DEBUG_KERNEL
7156 + depends on DEBUG_KERNEL && BROKEN
7157 ---help---
7158 Mark the kernel read-only data as write-protected in the pagetables,
7159 in order to catch accidental (and incorrect) writes to such const
7160 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7161
7162 config DEBUG_SET_MODULE_RONX
7163 bool "Set loadable kernel module data as NX and text as RO"
7164 - depends on MODULES
7165 + depends on MODULES && BROKEN
7166 ---help---
7167 This option helps catch unintended modifications to loadable
7168 kernel module's text and read-only data. It also prevents execution
7169 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7170 index 209ba12..15140db 100644
7171 --- a/arch/x86/Makefile
7172 +++ b/arch/x86/Makefile
7173 @@ -46,6 +46,7 @@ else
7174 UTS_MACHINE := x86_64
7175 CHECKFLAGS += -D__x86_64__ -m64
7176
7177 + biarch := $(call cc-option,-m64)
7178 KBUILD_AFLAGS += -m64
7179 KBUILD_CFLAGS += -m64
7180
7181 @@ -201,3 +202,12 @@ define archhelp
7182 echo ' FDARGS="..." arguments for the booted kernel'
7183 echo ' FDINITRD=file initrd for the booted kernel'
7184 endef
7185 +
7186 +define OLD_LD
7187 +
7188 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7189 +*** Please upgrade your binutils to 2.18 or newer
7190 +endef
7191 +
7192 +archprepare:
7193 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7194 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7195 index 95365a8..52f857b 100644
7196 --- a/arch/x86/boot/Makefile
7197 +++ b/arch/x86/boot/Makefile
7198 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7199 $(call cc-option, -fno-stack-protector) \
7200 $(call cc-option, -mpreferred-stack-boundary=2)
7201 KBUILD_CFLAGS += $(call cc-option, -m32)
7202 +ifdef CONSTIFY_PLUGIN
7203 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7204 +endif
7205 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7206 GCOV_PROFILE := n
7207
7208 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7209 index 878e4b9..20537ab 100644
7210 --- a/arch/x86/boot/bitops.h
7211 +++ b/arch/x86/boot/bitops.h
7212 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7213 u8 v;
7214 const u32 *p = (const u32 *)addr;
7215
7216 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7217 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7218 return v;
7219 }
7220
7221 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7222
7223 static inline void set_bit(int nr, void *addr)
7224 {
7225 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7226 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7227 }
7228
7229 #endif /* BOOT_BITOPS_H */
7230 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7231 index c7093bd..d4247ffe0 100644
7232 --- a/arch/x86/boot/boot.h
7233 +++ b/arch/x86/boot/boot.h
7234 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7235 static inline u16 ds(void)
7236 {
7237 u16 seg;
7238 - asm("movw %%ds,%0" : "=rm" (seg));
7239 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7240 return seg;
7241 }
7242
7243 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7244 static inline int memcmp(const void *s1, const void *s2, size_t len)
7245 {
7246 u8 diff;
7247 - asm("repe; cmpsb; setnz %0"
7248 + asm volatile("repe; cmpsb; setnz %0"
7249 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7250 return diff;
7251 }
7252 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7253 index b123b9a..2cf2f23 100644
7254 --- a/arch/x86/boot/compressed/Makefile
7255 +++ b/arch/x86/boot/compressed/Makefile
7256 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7257 KBUILD_CFLAGS += $(cflags-y)
7258 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7259 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7260 +ifdef CONSTIFY_PLUGIN
7261 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7262 +endif
7263
7264 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7265 GCOV_PROFILE := n
7266 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7267 index a055993..47e126c 100644
7268 --- a/arch/x86/boot/compressed/head_32.S
7269 +++ b/arch/x86/boot/compressed/head_32.S
7270 @@ -98,7 +98,7 @@ preferred_addr:
7271 notl %eax
7272 andl %eax, %ebx
7273 #else
7274 - movl $LOAD_PHYSICAL_ADDR, %ebx
7275 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7276 #endif
7277
7278 /* Target address to relocate to for decompression */
7279 @@ -184,7 +184,7 @@ relocated:
7280 * and where it was actually loaded.
7281 */
7282 movl %ebp, %ebx
7283 - subl $LOAD_PHYSICAL_ADDR, %ebx
7284 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7285 jz 2f /* Nothing to be done if loaded at compiled addr. */
7286 /*
7287 * Process relocations.
7288 @@ -192,8 +192,7 @@ relocated:
7289
7290 1: subl $4, %edi
7291 movl (%edi), %ecx
7292 - testl %ecx, %ecx
7293 - jz 2f
7294 + jecxz 2f
7295 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7296 jmp 1b
7297 2:
7298 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7299 index 558d76c..606aa24 100644
7300 --- a/arch/x86/boot/compressed/head_64.S
7301 +++ b/arch/x86/boot/compressed/head_64.S
7302 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7303 notl %eax
7304 andl %eax, %ebx
7305 #else
7306 - movl $LOAD_PHYSICAL_ADDR, %ebx
7307 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7308 #endif
7309
7310 /* Target address to relocate to for decompression */
7311 @@ -253,7 +253,7 @@ preferred_addr:
7312 notq %rax
7313 andq %rax, %rbp
7314 #else
7315 - movq $LOAD_PHYSICAL_ADDR, %rbp
7316 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7317 #endif
7318
7319 /* Target address to relocate to for decompression */
7320 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7321 index 7116dcb..d9ae1d7 100644
7322 --- a/arch/x86/boot/compressed/misc.c
7323 +++ b/arch/x86/boot/compressed/misc.c
7324 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7325 case PT_LOAD:
7326 #ifdef CONFIG_RELOCATABLE
7327 dest = output;
7328 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7329 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7330 #else
7331 dest = (void *)(phdr->p_paddr);
7332 #endif
7333 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7334 error("Destination address too large");
7335 #endif
7336 #ifndef CONFIG_RELOCATABLE
7337 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7338 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7339 error("Wrong destination address");
7340 #endif
7341
7342 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7343 index 89bbf4e..869908e 100644
7344 --- a/arch/x86/boot/compressed/relocs.c
7345 +++ b/arch/x86/boot/compressed/relocs.c
7346 @@ -13,8 +13,11 @@
7347
7348 static void die(char *fmt, ...);
7349
7350 +#include "../../../../include/generated/autoconf.h"
7351 +
7352 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7353 static Elf32_Ehdr ehdr;
7354 +static Elf32_Phdr *phdr;
7355 static unsigned long reloc_count, reloc_idx;
7356 static unsigned long *relocs;
7357
7358 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7359 }
7360 }
7361
7362 +static void read_phdrs(FILE *fp)
7363 +{
7364 + unsigned int i;
7365 +
7366 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7367 + if (!phdr) {
7368 + die("Unable to allocate %d program headers\n",
7369 + ehdr.e_phnum);
7370 + }
7371 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7372 + die("Seek to %d failed: %s\n",
7373 + ehdr.e_phoff, strerror(errno));
7374 + }
7375 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7376 + die("Cannot read ELF program headers: %s\n",
7377 + strerror(errno));
7378 + }
7379 + for(i = 0; i < ehdr.e_phnum; i++) {
7380 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7381 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7382 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7383 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7384 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7385 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7386 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7387 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7388 + }
7389 +
7390 +}
7391 +
7392 static void read_shdrs(FILE *fp)
7393 {
7394 - int i;
7395 + unsigned int i;
7396 Elf32_Shdr shdr;
7397
7398 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7399 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7400
7401 static void read_strtabs(FILE *fp)
7402 {
7403 - int i;
7404 + unsigned int i;
7405 for (i = 0; i < ehdr.e_shnum; i++) {
7406 struct section *sec = &secs[i];
7407 if (sec->shdr.sh_type != SHT_STRTAB) {
7408 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7409
7410 static void read_symtabs(FILE *fp)
7411 {
7412 - int i,j;
7413 + unsigned int i,j;
7414 for (i = 0; i < ehdr.e_shnum; i++) {
7415 struct section *sec = &secs[i];
7416 if (sec->shdr.sh_type != SHT_SYMTAB) {
7417 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7418
7419 static void read_relocs(FILE *fp)
7420 {
7421 - int i,j;
7422 + unsigned int i,j;
7423 + uint32_t base;
7424 +
7425 for (i = 0; i < ehdr.e_shnum; i++) {
7426 struct section *sec = &secs[i];
7427 if (sec->shdr.sh_type != SHT_REL) {
7428 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7429 die("Cannot read symbol table: %s\n",
7430 strerror(errno));
7431 }
7432 + base = 0;
7433 + for (j = 0; j < ehdr.e_phnum; j++) {
7434 + if (phdr[j].p_type != PT_LOAD )
7435 + continue;
7436 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7437 + continue;
7438 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7439 + break;
7440 + }
7441 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7442 Elf32_Rel *rel = &sec->reltab[j];
7443 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7444 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7445 rel->r_info = elf32_to_cpu(rel->r_info);
7446 }
7447 }
7448 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7449
7450 static void print_absolute_symbols(void)
7451 {
7452 - int i;
7453 + unsigned int i;
7454 printf("Absolute symbols\n");
7455 printf(" Num: Value Size Type Bind Visibility Name\n");
7456 for (i = 0; i < ehdr.e_shnum; i++) {
7457 struct section *sec = &secs[i];
7458 char *sym_strtab;
7459 Elf32_Sym *sh_symtab;
7460 - int j;
7461 + unsigned int j;
7462
7463 if (sec->shdr.sh_type != SHT_SYMTAB) {
7464 continue;
7465 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7466
7467 static void print_absolute_relocs(void)
7468 {
7469 - int i, printed = 0;
7470 + unsigned int i, printed = 0;
7471
7472 for (i = 0; i < ehdr.e_shnum; i++) {
7473 struct section *sec = &secs[i];
7474 struct section *sec_applies, *sec_symtab;
7475 char *sym_strtab;
7476 Elf32_Sym *sh_symtab;
7477 - int j;
7478 + unsigned int j;
7479 if (sec->shdr.sh_type != SHT_REL) {
7480 continue;
7481 }
7482 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7483
7484 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7485 {
7486 - int i;
7487 + unsigned int i;
7488 /* Walk through the relocations */
7489 for (i = 0; i < ehdr.e_shnum; i++) {
7490 char *sym_strtab;
7491 Elf32_Sym *sh_symtab;
7492 struct section *sec_applies, *sec_symtab;
7493 - int j;
7494 + unsigned int j;
7495 struct section *sec = &secs[i];
7496
7497 if (sec->shdr.sh_type != SHT_REL) {
7498 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7499 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7500 continue;
7501 }
7502 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7503 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7504 + continue;
7505 +
7506 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7507 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7508 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7509 + continue;
7510 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7511 + continue;
7512 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7513 + continue;
7514 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7515 + continue;
7516 +#endif
7517 +
7518 switch (r_type) {
7519 case R_386_NONE:
7520 case R_386_PC32:
7521 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7522
7523 static void emit_relocs(int as_text)
7524 {
7525 - int i;
7526 + unsigned int i;
7527 /* Count how many relocations I have and allocate space for them. */
7528 reloc_count = 0;
7529 walk_relocs(count_reloc);
7530 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
7531 fname, strerror(errno));
7532 }
7533 read_ehdr(fp);
7534 + read_phdrs(fp);
7535 read_shdrs(fp);
7536 read_strtabs(fp);
7537 read_symtabs(fp);
7538 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7539 index 4d3ff03..e4972ff 100644
7540 --- a/arch/x86/boot/cpucheck.c
7541 +++ b/arch/x86/boot/cpucheck.c
7542 @@ -74,7 +74,7 @@ static int has_fpu(void)
7543 u16 fcw = -1, fsw = -1;
7544 u32 cr0;
7545
7546 - asm("movl %%cr0,%0" : "=r" (cr0));
7547 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7548 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7549 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7550 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7551 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7552 {
7553 u32 f0, f1;
7554
7555 - asm("pushfl ; "
7556 + asm volatile("pushfl ; "
7557 "pushfl ; "
7558 "popl %0 ; "
7559 "movl %0,%1 ; "
7560 @@ -115,7 +115,7 @@ static void get_flags(void)
7561 set_bit(X86_FEATURE_FPU, cpu.flags);
7562
7563 if (has_eflag(X86_EFLAGS_ID)) {
7564 - asm("cpuid"
7565 + asm volatile("cpuid"
7566 : "=a" (max_intel_level),
7567 "=b" (cpu_vendor[0]),
7568 "=d" (cpu_vendor[1]),
7569 @@ -124,7 +124,7 @@ static void get_flags(void)
7570
7571 if (max_intel_level >= 0x00000001 &&
7572 max_intel_level <= 0x0000ffff) {
7573 - asm("cpuid"
7574 + asm volatile("cpuid"
7575 : "=a" (tfms),
7576 "=c" (cpu.flags[4]),
7577 "=d" (cpu.flags[0])
7578 @@ -136,7 +136,7 @@ static void get_flags(void)
7579 cpu.model += ((tfms >> 16) & 0xf) << 4;
7580 }
7581
7582 - asm("cpuid"
7583 + asm volatile("cpuid"
7584 : "=a" (max_amd_level)
7585 : "a" (0x80000000)
7586 : "ebx", "ecx", "edx");
7587 @@ -144,7 +144,7 @@ static void get_flags(void)
7588 if (max_amd_level >= 0x80000001 &&
7589 max_amd_level <= 0x8000ffff) {
7590 u32 eax = 0x80000001;
7591 - asm("cpuid"
7592 + asm volatile("cpuid"
7593 : "+a" (eax),
7594 "=c" (cpu.flags[6]),
7595 "=d" (cpu.flags[1])
7596 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7597 u32 ecx = MSR_K7_HWCR;
7598 u32 eax, edx;
7599
7600 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7601 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7602 eax &= ~(1 << 15);
7603 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7604 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7605
7606 get_flags(); /* Make sure it really did something */
7607 err = check_flags();
7608 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7609 u32 ecx = MSR_VIA_FCR;
7610 u32 eax, edx;
7611
7612 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7613 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7614 eax |= (1<<1)|(1<<7);
7615 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7616 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7617
7618 set_bit(X86_FEATURE_CX8, cpu.flags);
7619 err = check_flags();
7620 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7621 u32 eax, edx;
7622 u32 level = 1;
7623
7624 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7625 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7626 - asm("cpuid"
7627 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7628 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7629 + asm volatile("cpuid"
7630 : "+a" (level), "=d" (cpu.flags[0])
7631 : : "ecx", "ebx");
7632 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7633 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7634
7635 err = check_flags();
7636 }
7637 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7638 index f1bbeeb..aff09cb 100644
7639 --- a/arch/x86/boot/header.S
7640 +++ b/arch/x86/boot/header.S
7641 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7642 # single linked list of
7643 # struct setup_data
7644
7645 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7646 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7647
7648 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7649 #define VO_INIT_SIZE (VO__end - VO__text)
7650 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7651 index db75d07..8e6d0af 100644
7652 --- a/arch/x86/boot/memory.c
7653 +++ b/arch/x86/boot/memory.c
7654 @@ -19,7 +19,7 @@
7655
7656 static int detect_memory_e820(void)
7657 {
7658 - int count = 0;
7659 + unsigned int count = 0;
7660 struct biosregs ireg, oreg;
7661 struct e820entry *desc = boot_params.e820_map;
7662 static struct e820entry buf; /* static so it is zeroed */
7663 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7664 index 11e8c6e..fdbb1ed 100644
7665 --- a/arch/x86/boot/video-vesa.c
7666 +++ b/arch/x86/boot/video-vesa.c
7667 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7668
7669 boot_params.screen_info.vesapm_seg = oreg.es;
7670 boot_params.screen_info.vesapm_off = oreg.di;
7671 + boot_params.screen_info.vesapm_size = oreg.cx;
7672 }
7673
7674 /*
7675 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7676 index 43eda28..5ab5fdb 100644
7677 --- a/arch/x86/boot/video.c
7678 +++ b/arch/x86/boot/video.c
7679 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7680 static unsigned int get_entry(void)
7681 {
7682 char entry_buf[4];
7683 - int i, len = 0;
7684 + unsigned int i, len = 0;
7685 int key;
7686 unsigned int v;
7687
7688 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7689 index 5b577d5..3c1fed4 100644
7690 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7691 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7692 @@ -8,6 +8,8 @@
7693 * including this sentence is retained in full.
7694 */
7695
7696 +#include <asm/alternative-asm.h>
7697 +
7698 .extern crypto_ft_tab
7699 .extern crypto_it_tab
7700 .extern crypto_fl_tab
7701 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7702 je B192; \
7703 leaq 32(r9),r9;
7704
7705 +#define ret pax_force_retaddr 0, 1; ret
7706 +
7707 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7708 movq r1,r2; \
7709 movq r3,r4; \
7710 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7711 index be6d9e3..21fbbca 100644
7712 --- a/arch/x86/crypto/aesni-intel_asm.S
7713 +++ b/arch/x86/crypto/aesni-intel_asm.S
7714 @@ -31,6 +31,7 @@
7715
7716 #include <linux/linkage.h>
7717 #include <asm/inst.h>
7718 +#include <asm/alternative-asm.h>
7719
7720 #ifdef __x86_64__
7721 .data
7722 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7723 pop %r14
7724 pop %r13
7725 pop %r12
7726 + pax_force_retaddr 0, 1
7727 ret
7728 +ENDPROC(aesni_gcm_dec)
7729
7730
7731 /*****************************************************************************
7732 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7733 pop %r14
7734 pop %r13
7735 pop %r12
7736 + pax_force_retaddr 0, 1
7737 ret
7738 +ENDPROC(aesni_gcm_enc)
7739
7740 #endif
7741
7742 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7743 pxor %xmm1, %xmm0
7744 movaps %xmm0, (TKEYP)
7745 add $0x10, TKEYP
7746 + pax_force_retaddr_bts
7747 ret
7748
7749 .align 4
7750 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7751 shufps $0b01001110, %xmm2, %xmm1
7752 movaps %xmm1, 0x10(TKEYP)
7753 add $0x20, TKEYP
7754 + pax_force_retaddr_bts
7755 ret
7756
7757 .align 4
7758 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7759
7760 movaps %xmm0, (TKEYP)
7761 add $0x10, TKEYP
7762 + pax_force_retaddr_bts
7763 ret
7764
7765 .align 4
7766 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7767 pxor %xmm1, %xmm2
7768 movaps %xmm2, (TKEYP)
7769 add $0x10, TKEYP
7770 + pax_force_retaddr_bts
7771 ret
7772
7773 /*
7774 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7775 #ifndef __x86_64__
7776 popl KEYP
7777 #endif
7778 + pax_force_retaddr 0, 1
7779 ret
7780 +ENDPROC(aesni_set_key)
7781
7782 /*
7783 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7784 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7785 popl KLEN
7786 popl KEYP
7787 #endif
7788 + pax_force_retaddr 0, 1
7789 ret
7790 +ENDPROC(aesni_enc)
7791
7792 /*
7793 * _aesni_enc1: internal ABI
7794 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7795 AESENC KEY STATE
7796 movaps 0x70(TKEYP), KEY
7797 AESENCLAST KEY STATE
7798 + pax_force_retaddr_bts
7799 ret
7800
7801 /*
7802 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7803 AESENCLAST KEY STATE2
7804 AESENCLAST KEY STATE3
7805 AESENCLAST KEY STATE4
7806 + pax_force_retaddr_bts
7807 ret
7808
7809 /*
7810 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7811 popl KLEN
7812 popl KEYP
7813 #endif
7814 + pax_force_retaddr 0, 1
7815 ret
7816 +ENDPROC(aesni_dec)
7817
7818 /*
7819 * _aesni_dec1: internal ABI
7820 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7821 AESDEC KEY STATE
7822 movaps 0x70(TKEYP), KEY
7823 AESDECLAST KEY STATE
7824 + pax_force_retaddr_bts
7825 ret
7826
7827 /*
7828 @@ -2254,6 +2272,7 @@ _aesni_dec4:
7829 AESDECLAST KEY STATE2
7830 AESDECLAST KEY STATE3
7831 AESDECLAST KEY STATE4
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7837 popl KEYP
7838 popl LEN
7839 #endif
7840 + pax_force_retaddr 0, 1
7841 ret
7842 +ENDPROC(aesni_ecb_enc)
7843
7844 /*
7845 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7846 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7847 popl KEYP
7848 popl LEN
7849 #endif
7850 + pax_force_retaddr 0, 1
7851 ret
7852 +ENDPROC(aesni_ecb_dec)
7853
7854 /*
7855 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7856 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7857 popl LEN
7858 popl IVP
7859 #endif
7860 + pax_force_retaddr 0, 1
7861 ret
7862 +ENDPROC(aesni_cbc_enc)
7863
7864 /*
7865 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7866 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7867 popl LEN
7868 popl IVP
7869 #endif
7870 + pax_force_retaddr 0, 1
7871 ret
7872 +ENDPROC(aesni_cbc_dec)
7873
7874 #ifdef __x86_64__
7875 .align 16
7876 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
7877 mov $1, TCTR_LOW
7878 MOVQ_R64_XMM TCTR_LOW INC
7879 MOVQ_R64_XMM CTR TCTR_LOW
7880 + pax_force_retaddr_bts
7881 ret
7882
7883 /*
7884 @@ -2552,6 +2580,7 @@ _aesni_inc:
7885 .Linc_low:
7886 movaps CTR, IV
7887 PSHUFB_XMM BSWAP_MASK IV
7888 + pax_force_retaddr_bts
7889 ret
7890
7891 /*
7892 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7893 .Lctr_enc_ret:
7894 movups IV, (IVP)
7895 .Lctr_enc_just_ret:
7896 + pax_force_retaddr 0, 1
7897 ret
7898 +ENDPROC(aesni_ctr_enc)
7899 #endif
7900 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
7901 index 545d0ce..14841a6 100644
7902 --- a/arch/x86/crypto/aesni-intel_glue.c
7903 +++ b/arch/x86/crypto/aesni-intel_glue.c
7904 @@ -929,6 +929,8 @@ out_free_ablkcipher:
7905 }
7906
7907 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
7908 + unsigned int key_len) __size_overflow(3);
7909 +static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
7910 unsigned int key_len)
7911 {
7912 int ret = 0;
7913 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7914 index 391d245..67f35c2 100644
7915 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7916 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7917 @@ -20,6 +20,8 @@
7918 *
7919 */
7920
7921 +#include <asm/alternative-asm.h>
7922 +
7923 .file "blowfish-x86_64-asm.S"
7924 .text
7925
7926 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
7927 jnz __enc_xor;
7928
7929 write_block();
7930 + pax_force_retaddr 0, 1
7931 ret;
7932 __enc_xor:
7933 xor_block();
7934 + pax_force_retaddr 0, 1
7935 ret;
7936
7937 .align 8
7938 @@ -188,6 +192,7 @@ blowfish_dec_blk:
7939
7940 movq %r11, %rbp;
7941
7942 + pax_force_retaddr 0, 1
7943 ret;
7944
7945 /**********************************************************************
7946 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7947
7948 popq %rbx;
7949 popq %rbp;
7950 + pax_force_retaddr 0, 1
7951 ret;
7952
7953 __enc_xor4:
7954 @@ -349,6 +355,7 @@ __enc_xor4:
7955
7956 popq %rbx;
7957 popq %rbp;
7958 + pax_force_retaddr 0, 1
7959 ret;
7960
7961 .align 8
7962 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7963 popq %rbx;
7964 popq %rbp;
7965
7966 + pax_force_retaddr 0, 1
7967 ret;
7968
7969 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7970 index 6214a9b..1f4fc9a 100644
7971 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7972 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7973 @@ -1,3 +1,5 @@
7974 +#include <asm/alternative-asm.h>
7975 +
7976 # enter ECRYPT_encrypt_bytes
7977 .text
7978 .p2align 5
7979 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7980 add %r11,%rsp
7981 mov %rdi,%rax
7982 mov %rsi,%rdx
7983 + pax_force_retaddr 0, 1
7984 ret
7985 # bytesatleast65:
7986 ._bytesatleast65:
7987 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7988 add %r11,%rsp
7989 mov %rdi,%rax
7990 mov %rsi,%rdx
7991 + pax_force_retaddr
7992 ret
7993 # enter ECRYPT_ivsetup
7994 .text
7995 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7996 add %r11,%rsp
7997 mov %rdi,%rax
7998 mov %rsi,%rdx
7999 + pax_force_retaddr
8000 ret
8001 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8002 index 7f24a15..9cd3ffe 100644
8003 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8004 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8005 @@ -24,6 +24,8 @@
8006 *
8007 */
8008
8009 +#include <asm/alternative-asm.h>
8010 +
8011 .file "serpent-sse2-x86_64-asm_64.S"
8012 .text
8013
8014 @@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8015 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8016 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8017
8018 + pax_force_retaddr
8019 ret;
8020
8021 __enc_xor8:
8022 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8023 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8024
8025 + pax_force_retaddr
8026 ret;
8027
8028 .align 8
8029 @@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8030 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8031 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8032
8033 + pax_force_retaddr
8034 ret;
8035 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8036 index b2c2f57..8470cab 100644
8037 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8038 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8039 @@ -28,6 +28,8 @@
8040 * (at your option) any later version.
8041 */
8042
8043 +#include <asm/alternative-asm.h>
8044 +
8045 #define CTX %rdi // arg1
8046 #define BUF %rsi // arg2
8047 #define CNT %rdx // arg3
8048 @@ -104,6 +106,7 @@
8049 pop %r12
8050 pop %rbp
8051 pop %rbx
8052 + pax_force_retaddr 0, 1
8053 ret
8054
8055 .size \name, .-\name
8056 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8057 index 5b012a2..36d5364 100644
8058 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8059 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8060 @@ -20,6 +20,8 @@
8061 *
8062 */
8063
8064 +#include <asm/alternative-asm.h>
8065 +
8066 .file "twofish-x86_64-asm-3way.S"
8067 .text
8068
8069 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8070 popq %r13;
8071 popq %r14;
8072 popq %r15;
8073 + pax_force_retaddr 0, 1
8074 ret;
8075
8076 __enc_xor3:
8077 @@ -271,6 +274,7 @@ __enc_xor3:
8078 popq %r13;
8079 popq %r14;
8080 popq %r15;
8081 + pax_force_retaddr 0, 1
8082 ret;
8083
8084 .global twofish_dec_blk_3way
8085 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8086 popq %r13;
8087 popq %r14;
8088 popq %r15;
8089 + pax_force_retaddr 0, 1
8090 ret;
8091
8092 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8093 index 7bcf3fc..f53832f 100644
8094 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8095 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8096 @@ -21,6 +21,7 @@
8097 .text
8098
8099 #include <asm/asm-offsets.h>
8100 +#include <asm/alternative-asm.h>
8101
8102 #define a_offset 0
8103 #define b_offset 4
8104 @@ -268,6 +269,7 @@ twofish_enc_blk:
8105
8106 popq R1
8107 movq $1,%rax
8108 + pax_force_retaddr 0, 1
8109 ret
8110
8111 twofish_dec_blk:
8112 @@ -319,4 +321,5 @@ twofish_dec_blk:
8113
8114 popq R1
8115 movq $1,%rax
8116 + pax_force_retaddr 0, 1
8117 ret
8118 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8119 index 39e4909..887aa7e 100644
8120 --- a/arch/x86/ia32/ia32_aout.c
8121 +++ b/arch/x86/ia32/ia32_aout.c
8122 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8123 unsigned long dump_start, dump_size;
8124 struct user32 dump;
8125
8126 + memset(&dump, 0, sizeof(dump));
8127 +
8128 fs = get_fs();
8129 set_fs(KERNEL_DS);
8130 has_dumped = 1;
8131 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8132 index 6557769..ef6ae89 100644
8133 --- a/arch/x86/ia32/ia32_signal.c
8134 +++ b/arch/x86/ia32/ia32_signal.c
8135 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8136 }
8137 seg = get_fs();
8138 set_fs(KERNEL_DS);
8139 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8140 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8141 set_fs(seg);
8142 if (ret >= 0 && uoss_ptr) {
8143 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8144 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8145 */
8146 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8147 size_t frame_size,
8148 - void **fpstate)
8149 + void __user **fpstate)
8150 {
8151 unsigned long sp;
8152
8153 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8154
8155 if (used_math()) {
8156 sp = sp - sig_xstate_ia32_size;
8157 - *fpstate = (struct _fpstate_ia32 *) sp;
8158 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8159 if (save_i387_xstate_ia32(*fpstate) < 0)
8160 return (void __user *) -1L;
8161 }
8162 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8163 sp -= frame_size;
8164 /* Align the stack pointer according to the i386 ABI,
8165 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8166 - sp = ((sp + 4) & -16ul) - 4;
8167 + sp = ((sp - 12) & -16ul) - 4;
8168 return (void __user *) sp;
8169 }
8170
8171 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8172 * These are actually not used anymore, but left because some
8173 * gdb versions depend on them as a marker.
8174 */
8175 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8176 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8177 } put_user_catch(err);
8178
8179 if (err)
8180 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8181 0xb8,
8182 __NR_ia32_rt_sigreturn,
8183 0x80cd,
8184 - 0,
8185 + 0
8186 };
8187
8188 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8189 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8190
8191 if (ka->sa.sa_flags & SA_RESTORER)
8192 restorer = ka->sa.sa_restorer;
8193 + else if (current->mm->context.vdso)
8194 + /* Return stub is in 32bit vsyscall page */
8195 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8196 else
8197 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8198 - rt_sigreturn);
8199 + restorer = &frame->retcode;
8200 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8201
8202 /*
8203 * Not actually used anymore, but left because some gdb
8204 * versions need it.
8205 */
8206 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8207 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8208 } put_user_catch(err);
8209
8210 if (err)
8211 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8212 index e3e7340..05ed805 100644
8213 --- a/arch/x86/ia32/ia32entry.S
8214 +++ b/arch/x86/ia32/ia32entry.S
8215 @@ -13,8 +13,10 @@
8216 #include <asm/thread_info.h>
8217 #include <asm/segment.h>
8218 #include <asm/irqflags.h>
8219 +#include <asm/pgtable.h>
8220 #include <linux/linkage.h>
8221 #include <linux/err.h>
8222 +#include <asm/alternative-asm.h>
8223
8224 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8225 #include <linux/elf-em.h>
8226 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8227 ENDPROC(native_irq_enable_sysexit)
8228 #endif
8229
8230 + .macro pax_enter_kernel_user
8231 + pax_set_fptr_mask
8232 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8233 + call pax_enter_kernel_user
8234 +#endif
8235 + .endm
8236 +
8237 + .macro pax_exit_kernel_user
8238 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8239 + call pax_exit_kernel_user
8240 +#endif
8241 +#ifdef CONFIG_PAX_RANDKSTACK
8242 + pushq %rax
8243 + pushq %r11
8244 + call pax_randomize_kstack
8245 + popq %r11
8246 + popq %rax
8247 +#endif
8248 + .endm
8249 +
8250 +.macro pax_erase_kstack
8251 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8252 + call pax_erase_kstack
8253 +#endif
8254 +.endm
8255 +
8256 /*
8257 * 32bit SYSENTER instruction entry.
8258 *
8259 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8260 CFI_REGISTER rsp,rbp
8261 SWAPGS_UNSAFE_STACK
8262 movq PER_CPU_VAR(kernel_stack), %rsp
8263 - addq $(KERNEL_STACK_OFFSET),%rsp
8264 - /*
8265 - * No need to follow this irqs on/off section: the syscall
8266 - * disabled irqs, here we enable it straight after entry:
8267 - */
8268 - ENABLE_INTERRUPTS(CLBR_NONE)
8269 movl %ebp,%ebp /* zero extension */
8270 pushq_cfi $__USER32_DS
8271 /*CFI_REL_OFFSET ss,0*/
8272 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8273 CFI_REL_OFFSET rsp,0
8274 pushfq_cfi
8275 /*CFI_REL_OFFSET rflags,0*/
8276 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8277 - CFI_REGISTER rip,r10
8278 + orl $X86_EFLAGS_IF,(%rsp)
8279 + GET_THREAD_INFO(%r11)
8280 + movl TI_sysenter_return(%r11), %r11d
8281 + CFI_REGISTER rip,r11
8282 pushq_cfi $__USER32_CS
8283 /*CFI_REL_OFFSET cs,0*/
8284 movl %eax, %eax
8285 - pushq_cfi %r10
8286 + pushq_cfi %r11
8287 CFI_REL_OFFSET rip,0
8288 pushq_cfi %rax
8289 cld
8290 SAVE_ARGS 0,1,0
8291 + pax_enter_kernel_user
8292 + /*
8293 + * No need to follow this irqs on/off section: the syscall
8294 + * disabled irqs, here we enable it straight after entry:
8295 + */
8296 + ENABLE_INTERRUPTS(CLBR_NONE)
8297 /* no need to do an access_ok check here because rbp has been
8298 32bit zero extended */
8299 +
8300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8301 + mov $PAX_USER_SHADOW_BASE,%r11
8302 + add %r11,%rbp
8303 +#endif
8304 +
8305 1: movl (%rbp),%ebp
8306 .section __ex_table,"a"
8307 .quad 1b,ia32_badarg
8308 .previous
8309 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8310 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8311 + GET_THREAD_INFO(%r11)
8312 + orl $TS_COMPAT,TI_status(%r11)
8313 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8314 CFI_REMEMBER_STATE
8315 jnz sysenter_tracesys
8316 cmpq $(IA32_NR_syscalls-1),%rax
8317 @@ -160,12 +197,15 @@ sysenter_do_call:
8318 sysenter_dispatch:
8319 call *ia32_sys_call_table(,%rax,8)
8320 movq %rax,RAX-ARGOFFSET(%rsp)
8321 + GET_THREAD_INFO(%r11)
8322 DISABLE_INTERRUPTS(CLBR_NONE)
8323 TRACE_IRQS_OFF
8324 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8325 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8326 jnz sysexit_audit
8327 sysexit_from_sys_call:
8328 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8329 + pax_exit_kernel_user
8330 + pax_erase_kstack
8331 + andl $~TS_COMPAT,TI_status(%r11)
8332 /* clear IF, that popfq doesn't enable interrupts early */
8333 andl $~0x200,EFLAGS-R11(%rsp)
8334 movl RIP-R11(%rsp),%edx /* User %eip */
8335 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8336 movl %eax,%esi /* 2nd arg: syscall number */
8337 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8338 call __audit_syscall_entry
8339 +
8340 + pax_erase_kstack
8341 +
8342 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8343 cmpq $(IA32_NR_syscalls-1),%rax
8344 ja ia32_badsys
8345 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8346 .endm
8347
8348 .macro auditsys_exit exit
8349 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8350 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8351 jnz ia32_ret_from_sys_call
8352 TRACE_IRQS_ON
8353 sti
8354 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8355 1: setbe %al /* 1 if error, 0 if not */
8356 movzbl %al,%edi /* zero-extend that into %edi */
8357 call __audit_syscall_exit
8358 + GET_THREAD_INFO(%r11)
8359 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8360 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8361 cli
8362 TRACE_IRQS_OFF
8363 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8364 + testl %edi,TI_flags(%r11)
8365 jz \exit
8366 CLEAR_RREGS -ARGOFFSET
8367 jmp int_with_check
8368 @@ -235,7 +279,7 @@ sysexit_audit:
8369
8370 sysenter_tracesys:
8371 #ifdef CONFIG_AUDITSYSCALL
8372 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8373 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8374 jz sysenter_auditsys
8375 #endif
8376 SAVE_REST
8377 @@ -243,6 +287,9 @@ sysenter_tracesys:
8378 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8379 movq %rsp,%rdi /* &pt_regs -> arg1 */
8380 call syscall_trace_enter
8381 +
8382 + pax_erase_kstack
8383 +
8384 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8385 RESTORE_REST
8386 cmpq $(IA32_NR_syscalls-1),%rax
8387 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8388 ENTRY(ia32_cstar_target)
8389 CFI_STARTPROC32 simple
8390 CFI_SIGNAL_FRAME
8391 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8392 + CFI_DEF_CFA rsp,0
8393 CFI_REGISTER rip,rcx
8394 /*CFI_REGISTER rflags,r11*/
8395 SWAPGS_UNSAFE_STACK
8396 movl %esp,%r8d
8397 CFI_REGISTER rsp,r8
8398 movq PER_CPU_VAR(kernel_stack),%rsp
8399 + SAVE_ARGS 8*6,0,0
8400 + pax_enter_kernel_user
8401 /*
8402 * No need to follow this irqs on/off section: the syscall
8403 * disabled irqs and here we enable it straight after entry:
8404 */
8405 ENABLE_INTERRUPTS(CLBR_NONE)
8406 - SAVE_ARGS 8,0,0
8407 movl %eax,%eax /* zero extension */
8408 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8409 movq %rcx,RIP-ARGOFFSET(%rsp)
8410 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8411 /* no need to do an access_ok check here because r8 has been
8412 32bit zero extended */
8413 /* hardware stack frame is complete now */
8414 +
8415 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8416 + mov $PAX_USER_SHADOW_BASE,%r11
8417 + add %r11,%r8
8418 +#endif
8419 +
8420 1: movl (%r8),%r9d
8421 .section __ex_table,"a"
8422 .quad 1b,ia32_badarg
8423 .previous
8424 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8425 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8426 + GET_THREAD_INFO(%r11)
8427 + orl $TS_COMPAT,TI_status(%r11)
8428 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8429 CFI_REMEMBER_STATE
8430 jnz cstar_tracesys
8431 cmpq $IA32_NR_syscalls-1,%rax
8432 @@ -317,12 +372,15 @@ cstar_do_call:
8433 cstar_dispatch:
8434 call *ia32_sys_call_table(,%rax,8)
8435 movq %rax,RAX-ARGOFFSET(%rsp)
8436 + GET_THREAD_INFO(%r11)
8437 DISABLE_INTERRUPTS(CLBR_NONE)
8438 TRACE_IRQS_OFF
8439 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8440 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8441 jnz sysretl_audit
8442 sysretl_from_sys_call:
8443 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8444 + pax_exit_kernel_user
8445 + pax_erase_kstack
8446 + andl $~TS_COMPAT,TI_status(%r11)
8447 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8448 movl RIP-ARGOFFSET(%rsp),%ecx
8449 CFI_REGISTER rip,rcx
8450 @@ -350,7 +408,7 @@ sysretl_audit:
8451
8452 cstar_tracesys:
8453 #ifdef CONFIG_AUDITSYSCALL
8454 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8455 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8456 jz cstar_auditsys
8457 #endif
8458 xchgl %r9d,%ebp
8459 @@ -359,6 +417,9 @@ cstar_tracesys:
8460 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8461 movq %rsp,%rdi /* &pt_regs -> arg1 */
8462 call syscall_trace_enter
8463 +
8464 + pax_erase_kstack
8465 +
8466 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8467 RESTORE_REST
8468 xchgl %ebp,%r9d
8469 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8470 CFI_REL_OFFSET rip,RIP-RIP
8471 PARAVIRT_ADJUST_EXCEPTION_FRAME
8472 SWAPGS
8473 - /*
8474 - * No need to follow this irqs on/off section: the syscall
8475 - * disabled irqs and here we enable it straight after entry:
8476 - */
8477 - ENABLE_INTERRUPTS(CLBR_NONE)
8478 movl %eax,%eax
8479 pushq_cfi %rax
8480 cld
8481 /* note the registers are not zero extended to the sf.
8482 this could be a problem. */
8483 SAVE_ARGS 0,1,0
8484 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8485 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8486 + pax_enter_kernel_user
8487 + /*
8488 + * No need to follow this irqs on/off section: the syscall
8489 + * disabled irqs and here we enable it straight after entry:
8490 + */
8491 + ENABLE_INTERRUPTS(CLBR_NONE)
8492 + GET_THREAD_INFO(%r11)
8493 + orl $TS_COMPAT,TI_status(%r11)
8494 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8495 jnz ia32_tracesys
8496 cmpq $(IA32_NR_syscalls-1),%rax
8497 ja ia32_badsys
8498 @@ -435,6 +498,9 @@ ia32_tracesys:
8499 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8500 movq %rsp,%rdi /* &pt_regs -> arg1 */
8501 call syscall_trace_enter
8502 +
8503 + pax_erase_kstack
8504 +
8505 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8506 RESTORE_REST
8507 cmpq $(IA32_NR_syscalls-1),%rax
8508 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8509 index f6f5c53..b358b28 100644
8510 --- a/arch/x86/ia32/sys_ia32.c
8511 +++ b/arch/x86/ia32/sys_ia32.c
8512 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8513 */
8514 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8515 {
8516 - typeof(ubuf->st_uid) uid = 0;
8517 - typeof(ubuf->st_gid) gid = 0;
8518 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8519 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8520 SET_UID(uid, stat->uid);
8521 SET_GID(gid, stat->gid);
8522 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8523 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8524 }
8525 set_fs(KERNEL_DS);
8526 ret = sys_rt_sigprocmask(how,
8527 - set ? (sigset_t __user *)&s : NULL,
8528 - oset ? (sigset_t __user *)&s : NULL,
8529 + set ? (sigset_t __force_user *)&s : NULL,
8530 + oset ? (sigset_t __force_user *)&s : NULL,
8531 sigsetsize);
8532 set_fs(old_fs);
8533 if (ret)
8534 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8535 return alarm_setitimer(seconds);
8536 }
8537
8538 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8539 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8540 int options)
8541 {
8542 return compat_sys_wait4(pid, stat_addr, options, NULL);
8543 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8544 mm_segment_t old_fs = get_fs();
8545
8546 set_fs(KERNEL_DS);
8547 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8548 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8549 set_fs(old_fs);
8550 if (put_compat_timespec(&t, interval))
8551 return -EFAULT;
8552 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8553 mm_segment_t old_fs = get_fs();
8554
8555 set_fs(KERNEL_DS);
8556 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8557 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8558 set_fs(old_fs);
8559 if (!ret) {
8560 switch (_NSIG_WORDS) {
8561 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8562 if (copy_siginfo_from_user32(&info, uinfo))
8563 return -EFAULT;
8564 set_fs(KERNEL_DS);
8565 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8566 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8567 set_fs(old_fs);
8568 return ret;
8569 }
8570 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8571 return -EFAULT;
8572
8573 set_fs(KERNEL_DS);
8574 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8575 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8576 count);
8577 set_fs(old_fs);
8578
8579 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8580 index 952bd01..7692c6f 100644
8581 --- a/arch/x86/include/asm/alternative-asm.h
8582 +++ b/arch/x86/include/asm/alternative-asm.h
8583 @@ -15,6 +15,45 @@
8584 .endm
8585 #endif
8586
8587 +#ifdef KERNEXEC_PLUGIN
8588 + .macro pax_force_retaddr_bts rip=0
8589 + btsq $63,\rip(%rsp)
8590 + .endm
8591 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8592 + .macro pax_force_retaddr rip=0, reload=0
8593 + btsq $63,\rip(%rsp)
8594 + .endm
8595 + .macro pax_force_fptr ptr
8596 + btsq $63,\ptr
8597 + .endm
8598 + .macro pax_set_fptr_mask
8599 + .endm
8600 +#endif
8601 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8602 + .macro pax_force_retaddr rip=0, reload=0
8603 + .if \reload
8604 + pax_set_fptr_mask
8605 + .endif
8606 + orq %r10,\rip(%rsp)
8607 + .endm
8608 + .macro pax_force_fptr ptr
8609 + orq %r10,\ptr
8610 + .endm
8611 + .macro pax_set_fptr_mask
8612 + movabs $0x8000000000000000,%r10
8613 + .endm
8614 +#endif
8615 +#else
8616 + .macro pax_force_retaddr rip=0, reload=0
8617 + .endm
8618 + .macro pax_force_fptr ptr
8619 + .endm
8620 + .macro pax_force_retaddr_bts rip=0
8621 + .endm
8622 + .macro pax_set_fptr_mask
8623 + .endm
8624 +#endif
8625 +
8626 .macro altinstruction_entry orig alt feature orig_len alt_len
8627 .long \orig - .
8628 .long \alt - .
8629 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8630 index 37ad100..7d47faa 100644
8631 --- a/arch/x86/include/asm/alternative.h
8632 +++ b/arch/x86/include/asm/alternative.h
8633 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8634 ".section .discard,\"aw\",@progbits\n" \
8635 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8636 ".previous\n" \
8637 - ".section .altinstr_replacement, \"ax\"\n" \
8638 + ".section .altinstr_replacement, \"a\"\n" \
8639 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8640 ".previous"
8641
8642 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8643 index 3ab9bdd..238033e 100644
8644 --- a/arch/x86/include/asm/apic.h
8645 +++ b/arch/x86/include/asm/apic.h
8646 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8647
8648 #ifdef CONFIG_X86_LOCAL_APIC
8649
8650 -extern unsigned int apic_verbosity;
8651 +extern int apic_verbosity;
8652 extern int local_apic_timer_c2_ok;
8653
8654 extern int disable_apic;
8655 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8656 index 20370c6..a2eb9b0 100644
8657 --- a/arch/x86/include/asm/apm.h
8658 +++ b/arch/x86/include/asm/apm.h
8659 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8660 __asm__ __volatile__(APM_DO_ZERO_SEGS
8661 "pushl %%edi\n\t"
8662 "pushl %%ebp\n\t"
8663 - "lcall *%%cs:apm_bios_entry\n\t"
8664 + "lcall *%%ss:apm_bios_entry\n\t"
8665 "setc %%al\n\t"
8666 "popl %%ebp\n\t"
8667 "popl %%edi\n\t"
8668 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8669 __asm__ __volatile__(APM_DO_ZERO_SEGS
8670 "pushl %%edi\n\t"
8671 "pushl %%ebp\n\t"
8672 - "lcall *%%cs:apm_bios_entry\n\t"
8673 + "lcall *%%ss:apm_bios_entry\n\t"
8674 "setc %%bl\n\t"
8675 "popl %%ebp\n\t"
8676 "popl %%edi\n\t"
8677 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8678 index 58cb6d4..ca9010d 100644
8679 --- a/arch/x86/include/asm/atomic.h
8680 +++ b/arch/x86/include/asm/atomic.h
8681 @@ -22,7 +22,18 @@
8682 */
8683 static inline int atomic_read(const atomic_t *v)
8684 {
8685 - return (*(volatile int *)&(v)->counter);
8686 + return (*(volatile const int *)&(v)->counter);
8687 +}
8688 +
8689 +/**
8690 + * atomic_read_unchecked - read atomic variable
8691 + * @v: pointer of type atomic_unchecked_t
8692 + *
8693 + * Atomically reads the value of @v.
8694 + */
8695 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8696 +{
8697 + return (*(volatile const int *)&(v)->counter);
8698 }
8699
8700 /**
8701 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8702 }
8703
8704 /**
8705 + * atomic_set_unchecked - set atomic variable
8706 + * @v: pointer of type atomic_unchecked_t
8707 + * @i: required value
8708 + *
8709 + * Atomically sets the value of @v to @i.
8710 + */
8711 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8712 +{
8713 + v->counter = i;
8714 +}
8715 +
8716 +/**
8717 * atomic_add - add integer to atomic variable
8718 * @i: integer value to add
8719 * @v: pointer of type atomic_t
8720 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8721 */
8722 static inline void atomic_add(int i, atomic_t *v)
8723 {
8724 - asm volatile(LOCK_PREFIX "addl %1,%0"
8725 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8726 +
8727 +#ifdef CONFIG_PAX_REFCOUNT
8728 + "jno 0f\n"
8729 + LOCK_PREFIX "subl %1,%0\n"
8730 + "int $4\n0:\n"
8731 + _ASM_EXTABLE(0b, 0b)
8732 +#endif
8733 +
8734 + : "+m" (v->counter)
8735 + : "ir" (i));
8736 +}
8737 +
8738 +/**
8739 + * atomic_add_unchecked - add integer to atomic variable
8740 + * @i: integer value to add
8741 + * @v: pointer of type atomic_unchecked_t
8742 + *
8743 + * Atomically adds @i to @v.
8744 + */
8745 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8746 +{
8747 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8748 : "+m" (v->counter)
8749 : "ir" (i));
8750 }
8751 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8752 */
8753 static inline void atomic_sub(int i, atomic_t *v)
8754 {
8755 - asm volatile(LOCK_PREFIX "subl %1,%0"
8756 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8757 +
8758 +#ifdef CONFIG_PAX_REFCOUNT
8759 + "jno 0f\n"
8760 + LOCK_PREFIX "addl %1,%0\n"
8761 + "int $4\n0:\n"
8762 + _ASM_EXTABLE(0b, 0b)
8763 +#endif
8764 +
8765 + : "+m" (v->counter)
8766 + : "ir" (i));
8767 +}
8768 +
8769 +/**
8770 + * atomic_sub_unchecked - subtract integer from atomic variable
8771 + * @i: integer value to subtract
8772 + * @v: pointer of type atomic_unchecked_t
8773 + *
8774 + * Atomically subtracts @i from @v.
8775 + */
8776 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8777 +{
8778 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8779 : "+m" (v->counter)
8780 : "ir" (i));
8781 }
8782 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8783 {
8784 unsigned char c;
8785
8786 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8787 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8788 +
8789 +#ifdef CONFIG_PAX_REFCOUNT
8790 + "jno 0f\n"
8791 + LOCK_PREFIX "addl %2,%0\n"
8792 + "int $4\n0:\n"
8793 + _ASM_EXTABLE(0b, 0b)
8794 +#endif
8795 +
8796 + "sete %1\n"
8797 : "+m" (v->counter), "=qm" (c)
8798 : "ir" (i) : "memory");
8799 return c;
8800 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8801 */
8802 static inline void atomic_inc(atomic_t *v)
8803 {
8804 - asm volatile(LOCK_PREFIX "incl %0"
8805 + asm volatile(LOCK_PREFIX "incl %0\n"
8806 +
8807 +#ifdef CONFIG_PAX_REFCOUNT
8808 + "jno 0f\n"
8809 + LOCK_PREFIX "decl %0\n"
8810 + "int $4\n0:\n"
8811 + _ASM_EXTABLE(0b, 0b)
8812 +#endif
8813 +
8814 + : "+m" (v->counter));
8815 +}
8816 +
8817 +/**
8818 + * atomic_inc_unchecked - increment atomic variable
8819 + * @v: pointer of type atomic_unchecked_t
8820 + *
8821 + * Atomically increments @v by 1.
8822 + */
8823 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8824 +{
8825 + asm volatile(LOCK_PREFIX "incl %0\n"
8826 : "+m" (v->counter));
8827 }
8828
8829 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8830 */
8831 static inline void atomic_dec(atomic_t *v)
8832 {
8833 - asm volatile(LOCK_PREFIX "decl %0"
8834 + asm volatile(LOCK_PREFIX "decl %0\n"
8835 +
8836 +#ifdef CONFIG_PAX_REFCOUNT
8837 + "jno 0f\n"
8838 + LOCK_PREFIX "incl %0\n"
8839 + "int $4\n0:\n"
8840 + _ASM_EXTABLE(0b, 0b)
8841 +#endif
8842 +
8843 + : "+m" (v->counter));
8844 +}
8845 +
8846 +/**
8847 + * atomic_dec_unchecked - decrement atomic variable
8848 + * @v: pointer of type atomic_unchecked_t
8849 + *
8850 + * Atomically decrements @v by 1.
8851 + */
8852 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8853 +{
8854 + asm volatile(LOCK_PREFIX "decl %0\n"
8855 : "+m" (v->counter));
8856 }
8857
8858 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8859 {
8860 unsigned char c;
8861
8862 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8863 + asm volatile(LOCK_PREFIX "decl %0\n"
8864 +
8865 +#ifdef CONFIG_PAX_REFCOUNT
8866 + "jno 0f\n"
8867 + LOCK_PREFIX "incl %0\n"
8868 + "int $4\n0:\n"
8869 + _ASM_EXTABLE(0b, 0b)
8870 +#endif
8871 +
8872 + "sete %1\n"
8873 : "+m" (v->counter), "=qm" (c)
8874 : : "memory");
8875 return c != 0;
8876 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8877 {
8878 unsigned char c;
8879
8880 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8881 + asm volatile(LOCK_PREFIX "incl %0\n"
8882 +
8883 +#ifdef CONFIG_PAX_REFCOUNT
8884 + "jno 0f\n"
8885 + LOCK_PREFIX "decl %0\n"
8886 + "int $4\n0:\n"
8887 + _ASM_EXTABLE(0b, 0b)
8888 +#endif
8889 +
8890 + "sete %1\n"
8891 + : "+m" (v->counter), "=qm" (c)
8892 + : : "memory");
8893 + return c != 0;
8894 +}
8895 +
8896 +/**
8897 + * atomic_inc_and_test_unchecked - increment and test
8898 + * @v: pointer of type atomic_unchecked_t
8899 + *
8900 + * Atomically increments @v by 1
8901 + * and returns true if the result is zero, or false for all
8902 + * other cases.
8903 + */
8904 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8905 +{
8906 + unsigned char c;
8907 +
8908 + asm volatile(LOCK_PREFIX "incl %0\n"
8909 + "sete %1\n"
8910 : "+m" (v->counter), "=qm" (c)
8911 : : "memory");
8912 return c != 0;
8913 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8914 {
8915 unsigned char c;
8916
8917 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8918 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8919 +
8920 +#ifdef CONFIG_PAX_REFCOUNT
8921 + "jno 0f\n"
8922 + LOCK_PREFIX "subl %2,%0\n"
8923 + "int $4\n0:\n"
8924 + _ASM_EXTABLE(0b, 0b)
8925 +#endif
8926 +
8927 + "sets %1\n"
8928 : "+m" (v->counter), "=qm" (c)
8929 : "ir" (i) : "memory");
8930 return c;
8931 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8932 goto no_xadd;
8933 #endif
8934 /* Modern 486+ processor */
8935 - return i + xadd(&v->counter, i);
8936 + return i + xadd_check_overflow(&v->counter, i);
8937
8938 #ifdef CONFIG_M386
8939 no_xadd: /* Legacy 386 processor */
8940 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8941 }
8942
8943 /**
8944 + * atomic_add_return_unchecked - add integer and return
8945 + * @i: integer value to add
8946 + * @v: pointer of type atomic_unchecked_t
8947 + *
8948 + * Atomically adds @i to @v and returns @i + @v
8949 + */
8950 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8951 +{
8952 +#ifdef CONFIG_M386
8953 + int __i;
8954 + unsigned long flags;
8955 + if (unlikely(boot_cpu_data.x86 <= 3))
8956 + goto no_xadd;
8957 +#endif
8958 + /* Modern 486+ processor */
8959 + return i + xadd(&v->counter, i);
8960 +
8961 +#ifdef CONFIG_M386
8962 +no_xadd: /* Legacy 386 processor */
8963 + raw_local_irq_save(flags);
8964 + __i = atomic_read_unchecked(v);
8965 + atomic_set_unchecked(v, i + __i);
8966 + raw_local_irq_restore(flags);
8967 + return i + __i;
8968 +#endif
8969 +}
8970 +
8971 +/**
8972 * atomic_sub_return - subtract integer and return
8973 * @v: pointer of type atomic_t
8974 * @i: integer value to subtract
8975 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8976 }
8977
8978 #define atomic_inc_return(v) (atomic_add_return(1, v))
8979 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8980 +{
8981 + return atomic_add_return_unchecked(1, v);
8982 +}
8983 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8984
8985 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8986 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8987 return cmpxchg(&v->counter, old, new);
8988 }
8989
8990 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8991 +{
8992 + return cmpxchg(&v->counter, old, new);
8993 +}
8994 +
8995 static inline int atomic_xchg(atomic_t *v, int new)
8996 {
8997 return xchg(&v->counter, new);
8998 }
8999
9000 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9001 +{
9002 + return xchg(&v->counter, new);
9003 +}
9004 +
9005 /**
9006 * __atomic_add_unless - add unless the number is already a given value
9007 * @v: pointer of type atomic_t
9008 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9009 */
9010 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9011 {
9012 - int c, old;
9013 + int c, old, new;
9014 c = atomic_read(v);
9015 for (;;) {
9016 - if (unlikely(c == (u)))
9017 + if (unlikely(c == u))
9018 break;
9019 - old = atomic_cmpxchg((v), c, c + (a));
9020 +
9021 + asm volatile("addl %2,%0\n"
9022 +
9023 +#ifdef CONFIG_PAX_REFCOUNT
9024 + "jno 0f\n"
9025 + "subl %2,%0\n"
9026 + "int $4\n0:\n"
9027 + _ASM_EXTABLE(0b, 0b)
9028 +#endif
9029 +
9030 + : "=r" (new)
9031 + : "0" (c), "ir" (a));
9032 +
9033 + old = atomic_cmpxchg(v, c, new);
9034 if (likely(old == c))
9035 break;
9036 c = old;
9037 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9038 return c;
9039 }
9040
9041 +/**
9042 + * atomic_inc_not_zero_hint - increment if not null
9043 + * @v: pointer of type atomic_t
9044 + * @hint: probable value of the atomic before the increment
9045 + *
9046 + * This version of atomic_inc_not_zero() gives a hint of probable
9047 + * value of the atomic. This helps processor to not read the memory
9048 + * before doing the atomic read/modify/write cycle, lowering
9049 + * number of bus transactions on some arches.
9050 + *
9051 + * Returns: 0 if increment was not done, 1 otherwise.
9052 + */
9053 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9054 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9055 +{
9056 + int val, c = hint, new;
9057 +
9058 + /* sanity test, should be removed by compiler if hint is a constant */
9059 + if (!hint)
9060 + return __atomic_add_unless(v, 1, 0);
9061 +
9062 + do {
9063 + asm volatile("incl %0\n"
9064 +
9065 +#ifdef CONFIG_PAX_REFCOUNT
9066 + "jno 0f\n"
9067 + "decl %0\n"
9068 + "int $4\n0:\n"
9069 + _ASM_EXTABLE(0b, 0b)
9070 +#endif
9071 +
9072 + : "=r" (new)
9073 + : "0" (c));
9074 +
9075 + val = atomic_cmpxchg(v, c, new);
9076 + if (val == c)
9077 + return 1;
9078 + c = val;
9079 + } while (c);
9080 +
9081 + return 0;
9082 +}
9083
9084 /*
9085 * atomic_dec_if_positive - decrement by 1 if old value positive
9086 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9087 index fa13f0e..27c2e08 100644
9088 --- a/arch/x86/include/asm/atomic64_32.h
9089 +++ b/arch/x86/include/asm/atomic64_32.h
9090 @@ -12,6 +12,14 @@ typedef struct {
9091 u64 __aligned(8) counter;
9092 } atomic64_t;
9093
9094 +#ifdef CONFIG_PAX_REFCOUNT
9095 +typedef struct {
9096 + u64 __aligned(8) counter;
9097 +} atomic64_unchecked_t;
9098 +#else
9099 +typedef atomic64_t atomic64_unchecked_t;
9100 +#endif
9101 +
9102 #define ATOMIC64_INIT(val) { (val) }
9103
9104 #ifdef CONFIG_X86_CMPXCHG64
9105 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9106 }
9107
9108 /**
9109 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9110 + * @p: pointer to type atomic64_unchecked_t
9111 + * @o: expected value
9112 + * @n: new value
9113 + *
9114 + * Atomically sets @v to @n if it was equal to @o and returns
9115 + * the old value.
9116 + */
9117 +
9118 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9119 +{
9120 + return cmpxchg64(&v->counter, o, n);
9121 +}
9122 +
9123 +/**
9124 * atomic64_xchg - xchg atomic64 variable
9125 * @v: pointer to type atomic64_t
9126 * @n: value to assign
9127 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9128 }
9129
9130 /**
9131 + * atomic64_set_unchecked - set atomic64 variable
9132 + * @v: pointer to type atomic64_unchecked_t
9133 + * @n: value to assign
9134 + *
9135 + * Atomically sets the value of @v to @n.
9136 + */
9137 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9138 +{
9139 + unsigned high = (unsigned)(i >> 32);
9140 + unsigned low = (unsigned)i;
9141 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9142 + : "+b" (low), "+c" (high)
9143 + : "S" (v)
9144 + : "eax", "edx", "memory"
9145 + );
9146 +}
9147 +
9148 +/**
9149 * atomic64_read - read atomic64 variable
9150 * @v: pointer to type atomic64_t
9151 *
9152 @@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9153 }
9154
9155 /**
9156 + * atomic64_read_unchecked - read atomic64 variable
9157 + * @v: pointer to type atomic64_unchecked_t
9158 + *
9159 + * Atomically reads the value of @v and returns it.
9160 + */
9161 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9162 +{
9163 + long long r;
9164 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9165 + : "=A" (r), "+c" (v)
9166 + : : "memory"
9167 + );
9168 + return r;
9169 + }
9170 +
9171 +/**
9172 * atomic64_add_return - add and return
9173 * @i: integer value to add
9174 * @v: pointer to type atomic64_t
9175 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9176 return i;
9177 }
9178
9179 +/**
9180 + * atomic64_add_return_unchecked - add and return
9181 + * @i: integer value to add
9182 + * @v: pointer to type atomic64_unchecked_t
9183 + *
9184 + * Atomically adds @i to @v and returns @i + *@v
9185 + */
9186 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9187 +{
9188 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9189 + : "+A" (i), "+c" (v)
9190 + : : "memory"
9191 + );
9192 + return i;
9193 +}
9194 +
9195 /*
9196 * Other variants with different arithmetic operators:
9197 */
9198 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9199 return a;
9200 }
9201
9202 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9203 +{
9204 + long long a;
9205 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9206 + : "=A" (a)
9207 + : "S" (v)
9208 + : "memory", "ecx"
9209 + );
9210 + return a;
9211 +}
9212 +
9213 static inline long long atomic64_dec_return(atomic64_t *v)
9214 {
9215 long long a;
9216 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9217 }
9218
9219 /**
9220 + * atomic64_add_unchecked - add integer to atomic64 variable
9221 + * @i: integer value to add
9222 + * @v: pointer to type atomic64_unchecked_t
9223 + *
9224 + * Atomically adds @i to @v.
9225 + */
9226 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9227 +{
9228 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9229 + : "+A" (i), "+c" (v)
9230 + : : "memory"
9231 + );
9232 + return i;
9233 +}
9234 +
9235 +/**
9236 * atomic64_sub - subtract the atomic64 variable
9237 * @i: integer value to subtract
9238 * @v: pointer to type atomic64_t
9239 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9240 index 0e1cbfc..5623683 100644
9241 --- a/arch/x86/include/asm/atomic64_64.h
9242 +++ b/arch/x86/include/asm/atomic64_64.h
9243 @@ -18,7 +18,19 @@
9244 */
9245 static inline long atomic64_read(const atomic64_t *v)
9246 {
9247 - return (*(volatile long *)&(v)->counter);
9248 + return (*(volatile const long *)&(v)->counter);
9249 +}
9250 +
9251 +/**
9252 + * atomic64_read_unchecked - read atomic64 variable
9253 + * @v: pointer of type atomic64_unchecked_t
9254 + *
9255 + * Atomically reads the value of @v.
9256 + * Doesn't imply a read memory barrier.
9257 + */
9258 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9259 +{
9260 + return (*(volatile const long *)&(v)->counter);
9261 }
9262
9263 /**
9264 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9265 }
9266
9267 /**
9268 + * atomic64_set_unchecked - set atomic64 variable
9269 + * @v: pointer to type atomic64_unchecked_t
9270 + * @i: required value
9271 + *
9272 + * Atomically sets the value of @v to @i.
9273 + */
9274 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9275 +{
9276 + v->counter = i;
9277 +}
9278 +
9279 +/**
9280 * atomic64_add - add integer to atomic64 variable
9281 * @i: integer value to add
9282 * @v: pointer to type atomic64_t
9283 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9284 */
9285 static inline void atomic64_add(long i, atomic64_t *v)
9286 {
9287 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9288 +
9289 +#ifdef CONFIG_PAX_REFCOUNT
9290 + "jno 0f\n"
9291 + LOCK_PREFIX "subq %1,%0\n"
9292 + "int $4\n0:\n"
9293 + _ASM_EXTABLE(0b, 0b)
9294 +#endif
9295 +
9296 + : "=m" (v->counter)
9297 + : "er" (i), "m" (v->counter));
9298 +}
9299 +
9300 +/**
9301 + * atomic64_add_unchecked - add integer to atomic64 variable
9302 + * @i: integer value to add
9303 + * @v: pointer to type atomic64_unchecked_t
9304 + *
9305 + * Atomically adds @i to @v.
9306 + */
9307 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9308 +{
9309 asm volatile(LOCK_PREFIX "addq %1,%0"
9310 : "=m" (v->counter)
9311 : "er" (i), "m" (v->counter));
9312 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9313 */
9314 static inline void atomic64_sub(long i, atomic64_t *v)
9315 {
9316 - asm volatile(LOCK_PREFIX "subq %1,%0"
9317 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9318 +
9319 +#ifdef CONFIG_PAX_REFCOUNT
9320 + "jno 0f\n"
9321 + LOCK_PREFIX "addq %1,%0\n"
9322 + "int $4\n0:\n"
9323 + _ASM_EXTABLE(0b, 0b)
9324 +#endif
9325 +
9326 + : "=m" (v->counter)
9327 + : "er" (i), "m" (v->counter));
9328 +}
9329 +
9330 +/**
9331 + * atomic64_sub_unchecked - subtract the atomic64 variable
9332 + * @i: integer value to subtract
9333 + * @v: pointer to type atomic64_unchecked_t
9334 + *
9335 + * Atomically subtracts @i from @v.
9336 + */
9337 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9338 +{
9339 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9340 : "=m" (v->counter)
9341 : "er" (i), "m" (v->counter));
9342 }
9343 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9344 {
9345 unsigned char c;
9346
9347 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9348 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9349 +
9350 +#ifdef CONFIG_PAX_REFCOUNT
9351 + "jno 0f\n"
9352 + LOCK_PREFIX "addq %2,%0\n"
9353 + "int $4\n0:\n"
9354 + _ASM_EXTABLE(0b, 0b)
9355 +#endif
9356 +
9357 + "sete %1\n"
9358 : "=m" (v->counter), "=qm" (c)
9359 : "er" (i), "m" (v->counter) : "memory");
9360 return c;
9361 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9362 */
9363 static inline void atomic64_inc(atomic64_t *v)
9364 {
9365 + asm volatile(LOCK_PREFIX "incq %0\n"
9366 +
9367 +#ifdef CONFIG_PAX_REFCOUNT
9368 + "jno 0f\n"
9369 + LOCK_PREFIX "decq %0\n"
9370 + "int $4\n0:\n"
9371 + _ASM_EXTABLE(0b, 0b)
9372 +#endif
9373 +
9374 + : "=m" (v->counter)
9375 + : "m" (v->counter));
9376 +}
9377 +
9378 +/**
9379 + * atomic64_inc_unchecked - increment atomic64 variable
9380 + * @v: pointer to type atomic64_unchecked_t
9381 + *
9382 + * Atomically increments @v by 1.
9383 + */
9384 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9385 +{
9386 asm volatile(LOCK_PREFIX "incq %0"
9387 : "=m" (v->counter)
9388 : "m" (v->counter));
9389 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9390 */
9391 static inline void atomic64_dec(atomic64_t *v)
9392 {
9393 - asm volatile(LOCK_PREFIX "decq %0"
9394 + asm volatile(LOCK_PREFIX "decq %0\n"
9395 +
9396 +#ifdef CONFIG_PAX_REFCOUNT
9397 + "jno 0f\n"
9398 + LOCK_PREFIX "incq %0\n"
9399 + "int $4\n0:\n"
9400 + _ASM_EXTABLE(0b, 0b)
9401 +#endif
9402 +
9403 + : "=m" (v->counter)
9404 + : "m" (v->counter));
9405 +}
9406 +
9407 +/**
9408 + * atomic64_dec_unchecked - decrement atomic64 variable
9409 + * @v: pointer to type atomic64_t
9410 + *
9411 + * Atomically decrements @v by 1.
9412 + */
9413 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9414 +{
9415 + asm volatile(LOCK_PREFIX "decq %0\n"
9416 : "=m" (v->counter)
9417 : "m" (v->counter));
9418 }
9419 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9420 {
9421 unsigned char c;
9422
9423 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9424 + asm volatile(LOCK_PREFIX "decq %0\n"
9425 +
9426 +#ifdef CONFIG_PAX_REFCOUNT
9427 + "jno 0f\n"
9428 + LOCK_PREFIX "incq %0\n"
9429 + "int $4\n0:\n"
9430 + _ASM_EXTABLE(0b, 0b)
9431 +#endif
9432 +
9433 + "sete %1\n"
9434 : "=m" (v->counter), "=qm" (c)
9435 : "m" (v->counter) : "memory");
9436 return c != 0;
9437 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9438 {
9439 unsigned char c;
9440
9441 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9442 + asm volatile(LOCK_PREFIX "incq %0\n"
9443 +
9444 +#ifdef CONFIG_PAX_REFCOUNT
9445 + "jno 0f\n"
9446 + LOCK_PREFIX "decq %0\n"
9447 + "int $4\n0:\n"
9448 + _ASM_EXTABLE(0b, 0b)
9449 +#endif
9450 +
9451 + "sete %1\n"
9452 : "=m" (v->counter), "=qm" (c)
9453 : "m" (v->counter) : "memory");
9454 return c != 0;
9455 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9456 {
9457 unsigned char c;
9458
9459 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9460 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9461 +
9462 +#ifdef CONFIG_PAX_REFCOUNT
9463 + "jno 0f\n"
9464 + LOCK_PREFIX "subq %2,%0\n"
9465 + "int $4\n0:\n"
9466 + _ASM_EXTABLE(0b, 0b)
9467 +#endif
9468 +
9469 + "sets %1\n"
9470 : "=m" (v->counter), "=qm" (c)
9471 : "er" (i), "m" (v->counter) : "memory");
9472 return c;
9473 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9474 */
9475 static inline long atomic64_add_return(long i, atomic64_t *v)
9476 {
9477 + return i + xadd_check_overflow(&v->counter, i);
9478 +}
9479 +
9480 +/**
9481 + * atomic64_add_return_unchecked - add and return
9482 + * @i: integer value to add
9483 + * @v: pointer to type atomic64_unchecked_t
9484 + *
9485 + * Atomically adds @i to @v and returns @i + @v
9486 + */
9487 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9488 +{
9489 return i + xadd(&v->counter, i);
9490 }
9491
9492 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9493 }
9494
9495 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9496 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9497 +{
9498 + return atomic64_add_return_unchecked(1, v);
9499 +}
9500 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9501
9502 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9503 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9504 return cmpxchg(&v->counter, old, new);
9505 }
9506
9507 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9508 +{
9509 + return cmpxchg(&v->counter, old, new);
9510 +}
9511 +
9512 static inline long atomic64_xchg(atomic64_t *v, long new)
9513 {
9514 return xchg(&v->counter, new);
9515 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9516 */
9517 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9518 {
9519 - long c, old;
9520 + long c, old, new;
9521 c = atomic64_read(v);
9522 for (;;) {
9523 - if (unlikely(c == (u)))
9524 + if (unlikely(c == u))
9525 break;
9526 - old = atomic64_cmpxchg((v), c, c + (a));
9527 +
9528 + asm volatile("add %2,%0\n"
9529 +
9530 +#ifdef CONFIG_PAX_REFCOUNT
9531 + "jno 0f\n"
9532 + "sub %2,%0\n"
9533 + "int $4\n0:\n"
9534 + _ASM_EXTABLE(0b, 0b)
9535 +#endif
9536 +
9537 + : "=r" (new)
9538 + : "0" (c), "ir" (a));
9539 +
9540 + old = atomic64_cmpxchg(v, c, new);
9541 if (likely(old == c))
9542 break;
9543 c = old;
9544 }
9545 - return c != (u);
9546 + return c != u;
9547 }
9548
9549 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9550 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9551 index b97596e..9bd48b06 100644
9552 --- a/arch/x86/include/asm/bitops.h
9553 +++ b/arch/x86/include/asm/bitops.h
9554 @@ -38,7 +38,7 @@
9555 * a mask operation on a byte.
9556 */
9557 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9558 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9559 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9560 #define CONST_MASK(nr) (1 << ((nr) & 7))
9561
9562 /**
9563 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9564 index 5e1a2ee..c9f9533 100644
9565 --- a/arch/x86/include/asm/boot.h
9566 +++ b/arch/x86/include/asm/boot.h
9567 @@ -11,10 +11,15 @@
9568 #include <asm/pgtable_types.h>
9569
9570 /* Physical address where kernel should be loaded. */
9571 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9572 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9573 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9574 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9575
9576 +#ifndef __ASSEMBLY__
9577 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9578 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9579 +#endif
9580 +
9581 /* Minimum kernel alignment, as a power of two */
9582 #ifdef CONFIG_X86_64
9583 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9584 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9585 index 48f99f1..d78ebf9 100644
9586 --- a/arch/x86/include/asm/cache.h
9587 +++ b/arch/x86/include/asm/cache.h
9588 @@ -5,12 +5,13 @@
9589
9590 /* L1 cache line size */
9591 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9592 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9593 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9594
9595 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9596 +#define __read_only __attribute__((__section__(".data..read_only")))
9597
9598 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9599 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9600 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9601
9602 #ifdef CONFIG_X86_VSMP
9603 #ifdef CONFIG_SMP
9604 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9605 index 4e12668..501d239 100644
9606 --- a/arch/x86/include/asm/cacheflush.h
9607 +++ b/arch/x86/include/asm/cacheflush.h
9608 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9609 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9610
9611 if (pg_flags == _PGMT_DEFAULT)
9612 - return -1;
9613 + return ~0UL;
9614 else if (pg_flags == _PGMT_WC)
9615 return _PAGE_CACHE_WC;
9616 else if (pg_flags == _PGMT_UC_MINUS)
9617 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9618 index 46fc474..b02b0f9 100644
9619 --- a/arch/x86/include/asm/checksum_32.h
9620 +++ b/arch/x86/include/asm/checksum_32.h
9621 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9622 int len, __wsum sum,
9623 int *src_err_ptr, int *dst_err_ptr);
9624
9625 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9626 + int len, __wsum sum,
9627 + int *src_err_ptr, int *dst_err_ptr);
9628 +
9629 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9630 + int len, __wsum sum,
9631 + int *src_err_ptr, int *dst_err_ptr);
9632 +
9633 /*
9634 * Note: when you get a NULL pointer exception here this means someone
9635 * passed in an incorrect kernel address to one of these functions.
9636 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9637 int *err_ptr)
9638 {
9639 might_sleep();
9640 - return csum_partial_copy_generic((__force void *)src, dst,
9641 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9642 len, sum, err_ptr, NULL);
9643 }
9644
9645 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9646 {
9647 might_sleep();
9648 if (access_ok(VERIFY_WRITE, dst, len))
9649 - return csum_partial_copy_generic(src, (__force void *)dst,
9650 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9651 len, sum, NULL, err_ptr);
9652
9653 if (len)
9654 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9655 index 99480e5..d81165b 100644
9656 --- a/arch/x86/include/asm/cmpxchg.h
9657 +++ b/arch/x86/include/asm/cmpxchg.h
9658 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9659 __compiletime_error("Bad argument size for cmpxchg");
9660 extern void __xadd_wrong_size(void)
9661 __compiletime_error("Bad argument size for xadd");
9662 +extern void __xadd_check_overflow_wrong_size(void)
9663 + __compiletime_error("Bad argument size for xadd_check_overflow");
9664 extern void __add_wrong_size(void)
9665 __compiletime_error("Bad argument size for add");
9666 +extern void __add_check_overflow_wrong_size(void)
9667 + __compiletime_error("Bad argument size for add_check_overflow");
9668
9669 /*
9670 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9671 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9672 __ret; \
9673 })
9674
9675 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9676 + ({ \
9677 + __typeof__ (*(ptr)) __ret = (arg); \
9678 + switch (sizeof(*(ptr))) { \
9679 + case __X86_CASE_L: \
9680 + asm volatile (lock #op "l %0, %1\n" \
9681 + "jno 0f\n" \
9682 + "mov %0,%1\n" \
9683 + "int $4\n0:\n" \
9684 + _ASM_EXTABLE(0b, 0b) \
9685 + : "+r" (__ret), "+m" (*(ptr)) \
9686 + : : "memory", "cc"); \
9687 + break; \
9688 + case __X86_CASE_Q: \
9689 + asm volatile (lock #op "q %q0, %1\n" \
9690 + "jno 0f\n" \
9691 + "mov %0,%1\n" \
9692 + "int $4\n0:\n" \
9693 + _ASM_EXTABLE(0b, 0b) \
9694 + : "+r" (__ret), "+m" (*(ptr)) \
9695 + : : "memory", "cc"); \
9696 + break; \
9697 + default: \
9698 + __ ## op ## _check_overflow_wrong_size(); \
9699 + } \
9700 + __ret; \
9701 + })
9702 +
9703 /*
9704 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9705 * Since this is generally used to protect other memory information, we
9706 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9707 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9708 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9709
9710 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9711 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9712 +
9713 #define __add(ptr, inc, lock) \
9714 ({ \
9715 __typeof__ (*(ptr)) __ret = (inc); \
9716 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9717 index 8d67d42..183d0eb 100644
9718 --- a/arch/x86/include/asm/cpufeature.h
9719 +++ b/arch/x86/include/asm/cpufeature.h
9720 @@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9721 ".section .discard,\"aw\",@progbits\n"
9722 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9723 ".previous\n"
9724 - ".section .altinstr_replacement,\"ax\"\n"
9725 + ".section .altinstr_replacement,\"a\"\n"
9726 "3: movb $1,%0\n"
9727 "4:\n"
9728 ".previous\n"
9729 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9730 index e95822d..a90010e 100644
9731 --- a/arch/x86/include/asm/desc.h
9732 +++ b/arch/x86/include/asm/desc.h
9733 @@ -4,6 +4,7 @@
9734 #include <asm/desc_defs.h>
9735 #include <asm/ldt.h>
9736 #include <asm/mmu.h>
9737 +#include <asm/pgtable.h>
9738
9739 #include <linux/smp.h>
9740
9741 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9742
9743 desc->type = (info->read_exec_only ^ 1) << 1;
9744 desc->type |= info->contents << 2;
9745 + desc->type |= info->seg_not_present ^ 1;
9746
9747 desc->s = 1;
9748 desc->dpl = 0x3;
9749 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9750 }
9751
9752 extern struct desc_ptr idt_descr;
9753 -extern gate_desc idt_table[];
9754 extern struct desc_ptr nmi_idt_descr;
9755 -extern gate_desc nmi_idt_table[];
9756 -
9757 -struct gdt_page {
9758 - struct desc_struct gdt[GDT_ENTRIES];
9759 -} __attribute__((aligned(PAGE_SIZE)));
9760 -
9761 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9762 +extern gate_desc idt_table[256];
9763 +extern gate_desc nmi_idt_table[256];
9764
9765 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9766 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9767 {
9768 - return per_cpu(gdt_page, cpu).gdt;
9769 + return cpu_gdt_table[cpu];
9770 }
9771
9772 #ifdef CONFIG_X86_64
9773 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9774 unsigned long base, unsigned dpl, unsigned flags,
9775 unsigned short seg)
9776 {
9777 - gate->a = (seg << 16) | (base & 0xffff);
9778 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9779 + gate->gate.offset_low = base;
9780 + gate->gate.seg = seg;
9781 + gate->gate.reserved = 0;
9782 + gate->gate.type = type;
9783 + gate->gate.s = 0;
9784 + gate->gate.dpl = dpl;
9785 + gate->gate.p = 1;
9786 + gate->gate.offset_high = base >> 16;
9787 }
9788
9789 #endif
9790 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9791
9792 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9793 {
9794 + pax_open_kernel();
9795 memcpy(&idt[entry], gate, sizeof(*gate));
9796 + pax_close_kernel();
9797 }
9798
9799 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9800 {
9801 + pax_open_kernel();
9802 memcpy(&ldt[entry], desc, 8);
9803 + pax_close_kernel();
9804 }
9805
9806 static inline void
9807 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9808 default: size = sizeof(*gdt); break;
9809 }
9810
9811 + pax_open_kernel();
9812 memcpy(&gdt[entry], desc, size);
9813 + pax_close_kernel();
9814 }
9815
9816 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9817 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9818
9819 static inline void native_load_tr_desc(void)
9820 {
9821 + pax_open_kernel();
9822 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9823 + pax_close_kernel();
9824 }
9825
9826 static inline void native_load_gdt(const struct desc_ptr *dtr)
9827 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9828 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9829 unsigned int i;
9830
9831 + pax_open_kernel();
9832 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9833 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9834 + pax_close_kernel();
9835 }
9836
9837 #define _LDT_empty(info) \
9838 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9839 }
9840
9841 #ifdef CONFIG_X86_64
9842 -static inline void set_nmi_gate(int gate, void *addr)
9843 +static inline void set_nmi_gate(int gate, const void *addr)
9844 {
9845 gate_desc s;
9846
9847 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
9848 }
9849 #endif
9850
9851 -static inline void _set_gate(int gate, unsigned type, void *addr,
9852 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9853 unsigned dpl, unsigned ist, unsigned seg)
9854 {
9855 gate_desc s;
9856 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9857 * Pentium F0 0F bugfix can have resulted in the mapped
9858 * IDT being write-protected.
9859 */
9860 -static inline void set_intr_gate(unsigned int n, void *addr)
9861 +static inline void set_intr_gate(unsigned int n, const void *addr)
9862 {
9863 BUG_ON((unsigned)n > 0xFF);
9864 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9865 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9866 /*
9867 * This routine sets up an interrupt gate at directory privilege level 3.
9868 */
9869 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9870 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9871 {
9872 BUG_ON((unsigned)n > 0xFF);
9873 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9874 }
9875
9876 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9877 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9878 {
9879 BUG_ON((unsigned)n > 0xFF);
9880 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9881 }
9882
9883 -static inline void set_trap_gate(unsigned int n, void *addr)
9884 +static inline void set_trap_gate(unsigned int n, const void *addr)
9885 {
9886 BUG_ON((unsigned)n > 0xFF);
9887 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9888 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9889 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9890 {
9891 BUG_ON((unsigned)n > 0xFF);
9892 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9893 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9894 }
9895
9896 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9897 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9898 {
9899 BUG_ON((unsigned)n > 0xFF);
9900 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9901 }
9902
9903 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9904 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9905 {
9906 BUG_ON((unsigned)n > 0xFF);
9907 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9908 }
9909
9910 +#ifdef CONFIG_X86_32
9911 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9912 +{
9913 + struct desc_struct d;
9914 +
9915 + if (likely(limit))
9916 + limit = (limit - 1UL) >> PAGE_SHIFT;
9917 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9918 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9919 +}
9920 +#endif
9921 +
9922 #endif /* _ASM_X86_DESC_H */
9923 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9924 index 278441f..b95a174 100644
9925 --- a/arch/x86/include/asm/desc_defs.h
9926 +++ b/arch/x86/include/asm/desc_defs.h
9927 @@ -31,6 +31,12 @@ struct desc_struct {
9928 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9929 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9930 };
9931 + struct {
9932 + u16 offset_low;
9933 + u16 seg;
9934 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9935 + unsigned offset_high: 16;
9936 + } gate;
9937 };
9938 } __attribute__((packed));
9939
9940 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9941 index 3778256..c5d4fce 100644
9942 --- a/arch/x86/include/asm/e820.h
9943 +++ b/arch/x86/include/asm/e820.h
9944 @@ -69,7 +69,7 @@ struct e820map {
9945 #define ISA_START_ADDRESS 0xa0000
9946 #define ISA_END_ADDRESS 0x100000
9947
9948 -#define BIOS_BEGIN 0x000a0000
9949 +#define BIOS_BEGIN 0x000c0000
9950 #define BIOS_END 0x00100000
9951
9952 #define BIOS_ROM_BASE 0xffe00000
9953 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9954 index 5f962df..7289f09 100644
9955 --- a/arch/x86/include/asm/elf.h
9956 +++ b/arch/x86/include/asm/elf.h
9957 @@ -238,7 +238,25 @@ extern int force_personality32;
9958 the loader. We need to make sure that it is out of the way of the program
9959 that it will "exec", and that there is sufficient room for the brk. */
9960
9961 +#ifdef CONFIG_PAX_SEGMEXEC
9962 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9963 +#else
9964 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9965 +#endif
9966 +
9967 +#ifdef CONFIG_PAX_ASLR
9968 +#ifdef CONFIG_X86_32
9969 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9970 +
9971 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9972 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9973 +#else
9974 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9975 +
9976 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9977 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9978 +#endif
9979 +#endif
9980
9981 /* This yields a mask that user programs can use to figure out what
9982 instruction set this CPU supports. This could be done in user space,
9983 @@ -291,9 +309,7 @@ do { \
9984
9985 #define ARCH_DLINFO \
9986 do { \
9987 - if (vdso_enabled) \
9988 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9989 - (unsigned long)current->mm->context.vdso); \
9990 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9991 } while (0)
9992
9993 #define AT_SYSINFO 32
9994 @@ -304,7 +320,7 @@ do { \
9995
9996 #endif /* !CONFIG_X86_32 */
9997
9998 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9999 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10000
10001 #define VDSO_ENTRY \
10002 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10003 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10004 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10005 #define compat_arch_setup_additional_pages syscall32_setup_pages
10006
10007 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10008 -#define arch_randomize_brk arch_randomize_brk
10009 -
10010 /*
10011 * True on X86_32 or when emulating IA32 on X86_64
10012 */
10013 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10014 index cc70c1c..d96d011 100644
10015 --- a/arch/x86/include/asm/emergency-restart.h
10016 +++ b/arch/x86/include/asm/emergency-restart.h
10017 @@ -15,6 +15,6 @@ enum reboot_type {
10018
10019 extern enum reboot_type reboot_type;
10020
10021 -extern void machine_emergency_restart(void);
10022 +extern void machine_emergency_restart(void) __noreturn;
10023
10024 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10025 diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
10026 index dbe82a5..c6d8a00 100644
10027 --- a/arch/x86/include/asm/floppy.h
10028 +++ b/arch/x86/include/asm/floppy.h
10029 @@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
10030 }
10031
10032
10033 +static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
10034 static unsigned long vdma_mem_alloc(unsigned long size)
10035 {
10036 return (unsigned long)vmalloc(size);
10037 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10038 index d09bb03..4ea4194 100644
10039 --- a/arch/x86/include/asm/futex.h
10040 +++ b/arch/x86/include/asm/futex.h
10041 @@ -12,16 +12,18 @@
10042 #include <asm/system.h>
10043
10044 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10045 + typecheck(u32 __user *, uaddr); \
10046 asm volatile("1:\t" insn "\n" \
10047 "2:\t.section .fixup,\"ax\"\n" \
10048 "3:\tmov\t%3, %1\n" \
10049 "\tjmp\t2b\n" \
10050 "\t.previous\n" \
10051 _ASM_EXTABLE(1b, 3b) \
10052 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10053 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10054 : "i" (-EFAULT), "0" (oparg), "1" (0))
10055
10056 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10057 + typecheck(u32 __user *, uaddr); \
10058 asm volatile("1:\tmovl %2, %0\n" \
10059 "\tmovl\t%0, %3\n" \
10060 "\t" insn "\n" \
10061 @@ -34,7 +36,7 @@
10062 _ASM_EXTABLE(1b, 4b) \
10063 _ASM_EXTABLE(2b, 4b) \
10064 : "=&a" (oldval), "=&r" (ret), \
10065 - "+m" (*uaddr), "=&r" (tem) \
10066 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10067 : "r" (oparg), "i" (-EFAULT), "1" (0))
10068
10069 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10070 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10071
10072 switch (op) {
10073 case FUTEX_OP_SET:
10074 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10075 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10076 break;
10077 case FUTEX_OP_ADD:
10078 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10079 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10080 uaddr, oparg);
10081 break;
10082 case FUTEX_OP_OR:
10083 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10084 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10085 return -EFAULT;
10086
10087 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10088 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10089 "2:\t.section .fixup, \"ax\"\n"
10090 "3:\tmov %3, %0\n"
10091 "\tjmp 2b\n"
10092 "\t.previous\n"
10093 _ASM_EXTABLE(1b, 3b)
10094 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10095 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10096 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10097 : "memory"
10098 );
10099 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10100 index eb92a6e..b98b2f4 100644
10101 --- a/arch/x86/include/asm/hw_irq.h
10102 +++ b/arch/x86/include/asm/hw_irq.h
10103 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10104 extern void enable_IO_APIC(void);
10105
10106 /* Statistics */
10107 -extern atomic_t irq_err_count;
10108 -extern atomic_t irq_mis_count;
10109 +extern atomic_unchecked_t irq_err_count;
10110 +extern atomic_unchecked_t irq_mis_count;
10111
10112 /* EISA */
10113 extern void eisa_set_level_irq(unsigned int irq);
10114 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10115 index 2479049..3fb9795 100644
10116 --- a/arch/x86/include/asm/i387.h
10117 +++ b/arch/x86/include/asm/i387.h
10118 @@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10119 {
10120 int err;
10121
10122 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10123 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10124 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10125 +#endif
10126 +
10127 /* See comment in fxsave() below. */
10128 #ifdef CONFIG_AS_FXSAVEQ
10129 asm volatile("1: fxrstorq %[fx]\n\t"
10130 @@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10131 {
10132 int err;
10133
10134 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10135 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10136 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10137 +#endif
10138 +
10139 /*
10140 * Clear the bytes not touched by the fxsave and reserved
10141 * for the SW usage.
10142 @@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10143 "emms\n\t" /* clear stack tags */
10144 "fildl %P[addr]", /* set F?P to defined value */
10145 X86_FEATURE_FXSAVE_LEAK,
10146 - [addr] "m" (tsk->thread.fpu.has_fpu));
10147 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10148
10149 return fpu_restore_checking(&tsk->thread.fpu);
10150 }
10151 @@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10152 static inline bool interrupted_user_mode(void)
10153 {
10154 struct pt_regs *regs = get_irq_regs();
10155 - return regs && user_mode_vm(regs);
10156 + return regs && user_mode(regs);
10157 }
10158
10159 /*
10160 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10161 index d8e8eef..99f81ae 100644
10162 --- a/arch/x86/include/asm/io.h
10163 +++ b/arch/x86/include/asm/io.h
10164 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10165
10166 #include <linux/vmalloc.h>
10167
10168 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10169 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10170 +{
10171 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10172 +}
10173 +
10174 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10175 +{
10176 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10177 +}
10178 +
10179 /*
10180 * Convert a virtual cached pointer to an uncached pointer
10181 */
10182 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10183 index bba3cf8..06bc8da 100644
10184 --- a/arch/x86/include/asm/irqflags.h
10185 +++ b/arch/x86/include/asm/irqflags.h
10186 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10187 sti; \
10188 sysexit
10189
10190 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10191 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10192 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10193 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10194 +
10195 #else
10196 #define INTERRUPT_RETURN iret
10197 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10198 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10199 index 5478825..839e88c 100644
10200 --- a/arch/x86/include/asm/kprobes.h
10201 +++ b/arch/x86/include/asm/kprobes.h
10202 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10203 #define RELATIVEJUMP_SIZE 5
10204 #define RELATIVECALL_OPCODE 0xe8
10205 #define RELATIVE_ADDR_SIZE 4
10206 -#define MAX_STACK_SIZE 64
10207 -#define MIN_STACK_SIZE(ADDR) \
10208 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10209 - THREAD_SIZE - (unsigned long)(ADDR))) \
10210 - ? (MAX_STACK_SIZE) \
10211 - : (((unsigned long)current_thread_info()) + \
10212 - THREAD_SIZE - (unsigned long)(ADDR)))
10213 +#define MAX_STACK_SIZE 64UL
10214 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10215
10216 #define flush_insn_slot(p) do { } while (0)
10217
10218 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10219 index 52d6640..136b3bd 100644
10220 --- a/arch/x86/include/asm/kvm_host.h
10221 +++ b/arch/x86/include/asm/kvm_host.h
10222 @@ -663,7 +663,7 @@ struct kvm_x86_ops {
10223 int (*check_intercept)(struct kvm_vcpu *vcpu,
10224 struct x86_instruction_info *info,
10225 enum x86_intercept_stage stage);
10226 -};
10227 +} __do_const;
10228
10229 struct kvm_arch_async_pf {
10230 u32 token;
10231 @@ -694,7 +694,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
10232 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
10233
10234 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10235 - const void *val, int bytes);
10236 + const void *val, int bytes) __size_overflow(2);
10237 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
10238
10239 extern bool tdp_enabled;
10240 @@ -781,7 +781,7 @@ int fx_init(struct kvm_vcpu *vcpu);
10241
10242 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
10243 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
10244 - const u8 *new, int bytes);
10245 + const u8 *new, int bytes) __size_overflow(2);
10246 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
10247 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
10248 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
10249 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10250 index 9cdae5d..300d20f 100644
10251 --- a/arch/x86/include/asm/local.h
10252 +++ b/arch/x86/include/asm/local.h
10253 @@ -18,26 +18,58 @@ typedef struct {
10254
10255 static inline void local_inc(local_t *l)
10256 {
10257 - asm volatile(_ASM_INC "%0"
10258 + asm volatile(_ASM_INC "%0\n"
10259 +
10260 +#ifdef CONFIG_PAX_REFCOUNT
10261 + "jno 0f\n"
10262 + _ASM_DEC "%0\n"
10263 + "int $4\n0:\n"
10264 + _ASM_EXTABLE(0b, 0b)
10265 +#endif
10266 +
10267 : "+m" (l->a.counter));
10268 }
10269
10270 static inline void local_dec(local_t *l)
10271 {
10272 - asm volatile(_ASM_DEC "%0"
10273 + asm volatile(_ASM_DEC "%0\n"
10274 +
10275 +#ifdef CONFIG_PAX_REFCOUNT
10276 + "jno 0f\n"
10277 + _ASM_INC "%0\n"
10278 + "int $4\n0:\n"
10279 + _ASM_EXTABLE(0b, 0b)
10280 +#endif
10281 +
10282 : "+m" (l->a.counter));
10283 }
10284
10285 static inline void local_add(long i, local_t *l)
10286 {
10287 - asm volatile(_ASM_ADD "%1,%0"
10288 + asm volatile(_ASM_ADD "%1,%0\n"
10289 +
10290 +#ifdef CONFIG_PAX_REFCOUNT
10291 + "jno 0f\n"
10292 + _ASM_SUB "%1,%0\n"
10293 + "int $4\n0:\n"
10294 + _ASM_EXTABLE(0b, 0b)
10295 +#endif
10296 +
10297 : "+m" (l->a.counter)
10298 : "ir" (i));
10299 }
10300
10301 static inline void local_sub(long i, local_t *l)
10302 {
10303 - asm volatile(_ASM_SUB "%1,%0"
10304 + asm volatile(_ASM_SUB "%1,%0\n"
10305 +
10306 +#ifdef CONFIG_PAX_REFCOUNT
10307 + "jno 0f\n"
10308 + _ASM_ADD "%1,%0\n"
10309 + "int $4\n0:\n"
10310 + _ASM_EXTABLE(0b, 0b)
10311 +#endif
10312 +
10313 : "+m" (l->a.counter)
10314 : "ir" (i));
10315 }
10316 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10317 {
10318 unsigned char c;
10319
10320 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10321 + asm volatile(_ASM_SUB "%2,%0\n"
10322 +
10323 +#ifdef CONFIG_PAX_REFCOUNT
10324 + "jno 0f\n"
10325 + _ASM_ADD "%2,%0\n"
10326 + "int $4\n0:\n"
10327 + _ASM_EXTABLE(0b, 0b)
10328 +#endif
10329 +
10330 + "sete %1\n"
10331 : "+m" (l->a.counter), "=qm" (c)
10332 : "ir" (i) : "memory");
10333 return c;
10334 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10335 {
10336 unsigned char c;
10337
10338 - asm volatile(_ASM_DEC "%0; sete %1"
10339 + asm volatile(_ASM_DEC "%0\n"
10340 +
10341 +#ifdef CONFIG_PAX_REFCOUNT
10342 + "jno 0f\n"
10343 + _ASM_INC "%0\n"
10344 + "int $4\n0:\n"
10345 + _ASM_EXTABLE(0b, 0b)
10346 +#endif
10347 +
10348 + "sete %1\n"
10349 : "+m" (l->a.counter), "=qm" (c)
10350 : : "memory");
10351 return c != 0;
10352 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10353 {
10354 unsigned char c;
10355
10356 - asm volatile(_ASM_INC "%0; sete %1"
10357 + asm volatile(_ASM_INC "%0\n"
10358 +
10359 +#ifdef CONFIG_PAX_REFCOUNT
10360 + "jno 0f\n"
10361 + _ASM_DEC "%0\n"
10362 + "int $4\n0:\n"
10363 + _ASM_EXTABLE(0b, 0b)
10364 +#endif
10365 +
10366 + "sete %1\n"
10367 : "+m" (l->a.counter), "=qm" (c)
10368 : : "memory");
10369 return c != 0;
10370 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10371 {
10372 unsigned char c;
10373
10374 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10375 + asm volatile(_ASM_ADD "%2,%0\n"
10376 +
10377 +#ifdef CONFIG_PAX_REFCOUNT
10378 + "jno 0f\n"
10379 + _ASM_SUB "%2,%0\n"
10380 + "int $4\n0:\n"
10381 + _ASM_EXTABLE(0b, 0b)
10382 +#endif
10383 +
10384 + "sets %1\n"
10385 : "+m" (l->a.counter), "=qm" (c)
10386 : "ir" (i) : "memory");
10387 return c;
10388 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10389 #endif
10390 /* Modern 486+ processor */
10391 __i = i;
10392 - asm volatile(_ASM_XADD "%0, %1;"
10393 + asm volatile(_ASM_XADD "%0, %1\n"
10394 +
10395 +#ifdef CONFIG_PAX_REFCOUNT
10396 + "jno 0f\n"
10397 + _ASM_MOV "%0,%1\n"
10398 + "int $4\n0:\n"
10399 + _ASM_EXTABLE(0b, 0b)
10400 +#endif
10401 +
10402 : "+r" (i), "+m" (l->a.counter)
10403 : : "memory");
10404 return i + __i;
10405 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10406 index 593e51d..fa69c9a 100644
10407 --- a/arch/x86/include/asm/mman.h
10408 +++ b/arch/x86/include/asm/mman.h
10409 @@ -5,4 +5,14 @@
10410
10411 #include <asm-generic/mman.h>
10412
10413 +#ifdef __KERNEL__
10414 +#ifndef __ASSEMBLY__
10415 +#ifdef CONFIG_X86_32
10416 +#define arch_mmap_check i386_mmap_check
10417 +int i386_mmap_check(unsigned long addr, unsigned long len,
10418 + unsigned long flags);
10419 +#endif
10420 +#endif
10421 +#endif
10422 +
10423 #endif /* _ASM_X86_MMAN_H */
10424 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10425 index 5f55e69..e20bfb1 100644
10426 --- a/arch/x86/include/asm/mmu.h
10427 +++ b/arch/x86/include/asm/mmu.h
10428 @@ -9,7 +9,7 @@
10429 * we put the segment information here.
10430 */
10431 typedef struct {
10432 - void *ldt;
10433 + struct desc_struct *ldt;
10434 int size;
10435
10436 #ifdef CONFIG_X86_64
10437 @@ -18,7 +18,19 @@ typedef struct {
10438 #endif
10439
10440 struct mutex lock;
10441 - void *vdso;
10442 + unsigned long vdso;
10443 +
10444 +#ifdef CONFIG_X86_32
10445 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10446 + unsigned long user_cs_base;
10447 + unsigned long user_cs_limit;
10448 +
10449 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10450 + cpumask_t cpu_user_cs_mask;
10451 +#endif
10452 +
10453 +#endif
10454 +#endif
10455 } mm_context_t;
10456
10457 #ifdef CONFIG_SMP
10458 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10459 index 6902152..399f3a2 100644
10460 --- a/arch/x86/include/asm/mmu_context.h
10461 +++ b/arch/x86/include/asm/mmu_context.h
10462 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10463
10464 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10465 {
10466 +
10467 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10468 + unsigned int i;
10469 + pgd_t *pgd;
10470 +
10471 + pax_open_kernel();
10472 + pgd = get_cpu_pgd(smp_processor_id());
10473 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10474 + set_pgd_batched(pgd+i, native_make_pgd(0));
10475 + pax_close_kernel();
10476 +#endif
10477 +
10478 #ifdef CONFIG_SMP
10479 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10480 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10481 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10482 struct task_struct *tsk)
10483 {
10484 unsigned cpu = smp_processor_id();
10485 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10486 + int tlbstate = TLBSTATE_OK;
10487 +#endif
10488
10489 if (likely(prev != next)) {
10490 #ifdef CONFIG_SMP
10491 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10492 + tlbstate = percpu_read(cpu_tlbstate.state);
10493 +#endif
10494 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10495 percpu_write(cpu_tlbstate.active_mm, next);
10496 #endif
10497 cpumask_set_cpu(cpu, mm_cpumask(next));
10498
10499 /* Re-load page tables */
10500 +#ifdef CONFIG_PAX_PER_CPU_PGD
10501 + pax_open_kernel();
10502 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10503 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10504 + pax_close_kernel();
10505 + load_cr3(get_cpu_pgd(cpu));
10506 +#else
10507 load_cr3(next->pgd);
10508 +#endif
10509
10510 /* stop flush ipis for the previous mm */
10511 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10512 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10513 */
10514 if (unlikely(prev->context.ldt != next->context.ldt))
10515 load_LDT_nolock(&next->context);
10516 - }
10517 +
10518 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10519 + if (!(__supported_pte_mask & _PAGE_NX)) {
10520 + smp_mb__before_clear_bit();
10521 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10522 + smp_mb__after_clear_bit();
10523 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10524 + }
10525 +#endif
10526 +
10527 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10528 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10529 + prev->context.user_cs_limit != next->context.user_cs_limit))
10530 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10531 #ifdef CONFIG_SMP
10532 + else if (unlikely(tlbstate != TLBSTATE_OK))
10533 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10534 +#endif
10535 +#endif
10536 +
10537 + }
10538 else {
10539 +
10540 +#ifdef CONFIG_PAX_PER_CPU_PGD
10541 + pax_open_kernel();
10542 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10543 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10544 + pax_close_kernel();
10545 + load_cr3(get_cpu_pgd(cpu));
10546 +#endif
10547 +
10548 +#ifdef CONFIG_SMP
10549 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10550 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10551
10552 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10553 * tlb flush IPI delivery. We must reload CR3
10554 * to make sure to use no freed page tables.
10555 */
10556 +
10557 +#ifndef CONFIG_PAX_PER_CPU_PGD
10558 load_cr3(next->pgd);
10559 +#endif
10560 +
10561 load_LDT_nolock(&next->context);
10562 +
10563 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10564 + if (!(__supported_pte_mask & _PAGE_NX))
10565 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10566 +#endif
10567 +
10568 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10569 +#ifdef CONFIG_PAX_PAGEEXEC
10570 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10571 +#endif
10572 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10573 +#endif
10574 +
10575 }
10576 +#endif
10577 }
10578 -#endif
10579 }
10580
10581 #define activate_mm(prev, next) \
10582 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10583 index 9eae775..c914fea 100644
10584 --- a/arch/x86/include/asm/module.h
10585 +++ b/arch/x86/include/asm/module.h
10586 @@ -5,6 +5,7 @@
10587
10588 #ifdef CONFIG_X86_64
10589 /* X86_64 does not define MODULE_PROC_FAMILY */
10590 +#define MODULE_PROC_FAMILY ""
10591 #elif defined CONFIG_M386
10592 #define MODULE_PROC_FAMILY "386 "
10593 #elif defined CONFIG_M486
10594 @@ -59,8 +60,20 @@
10595 #error unknown processor family
10596 #endif
10597
10598 -#ifdef CONFIG_X86_32
10599 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10600 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10601 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10602 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10603 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10604 +#else
10605 +#define MODULE_PAX_KERNEXEC ""
10606 #endif
10607
10608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10609 +#define MODULE_PAX_UDEREF "UDEREF "
10610 +#else
10611 +#define MODULE_PAX_UDEREF ""
10612 +#endif
10613 +
10614 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10615 +
10616 #endif /* _ASM_X86_MODULE_H */
10617 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10618 index 7639dbf..e08a58c 100644
10619 --- a/arch/x86/include/asm/page_64_types.h
10620 +++ b/arch/x86/include/asm/page_64_types.h
10621 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10622
10623 /* duplicated to the one in bootmem.h */
10624 extern unsigned long max_pfn;
10625 -extern unsigned long phys_base;
10626 +extern const unsigned long phys_base;
10627
10628 extern unsigned long __phys_addr(unsigned long);
10629 #define __phys_reloc_hide(x) (x)
10630 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10631 index a7d2db9..edb023e 100644
10632 --- a/arch/x86/include/asm/paravirt.h
10633 +++ b/arch/x86/include/asm/paravirt.h
10634 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10635 val);
10636 }
10637
10638 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10639 +{
10640 + pgdval_t val = native_pgd_val(pgd);
10641 +
10642 + if (sizeof(pgdval_t) > sizeof(long))
10643 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10644 + val, (u64)val >> 32);
10645 + else
10646 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10647 + val);
10648 +}
10649 +
10650 static inline void pgd_clear(pgd_t *pgdp)
10651 {
10652 set_pgd(pgdp, __pgd(0));
10653 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10654 pv_mmu_ops.set_fixmap(idx, phys, flags);
10655 }
10656
10657 +#ifdef CONFIG_PAX_KERNEXEC
10658 +static inline unsigned long pax_open_kernel(void)
10659 +{
10660 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10661 +}
10662 +
10663 +static inline unsigned long pax_close_kernel(void)
10664 +{
10665 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10666 +}
10667 +#else
10668 +static inline unsigned long pax_open_kernel(void) { return 0; }
10669 +static inline unsigned long pax_close_kernel(void) { return 0; }
10670 +#endif
10671 +
10672 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10673
10674 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10675 @@ -964,7 +991,7 @@ extern void default_banner(void);
10676
10677 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10678 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10679 -#define PARA_INDIRECT(addr) *%cs:addr
10680 +#define PARA_INDIRECT(addr) *%ss:addr
10681 #endif
10682
10683 #define INTERRUPT_RETURN \
10684 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10685 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10686 CLBR_NONE, \
10687 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10688 +
10689 +#define GET_CR0_INTO_RDI \
10690 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10691 + mov %rax,%rdi
10692 +
10693 +#define SET_RDI_INTO_CR0 \
10694 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10695 +
10696 +#define GET_CR3_INTO_RDI \
10697 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10698 + mov %rax,%rdi
10699 +
10700 +#define SET_RDI_INTO_CR3 \
10701 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10702 +
10703 #endif /* CONFIG_X86_32 */
10704
10705 #endif /* __ASSEMBLY__ */
10706 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10707 index 8e8b9a4..f07d725 100644
10708 --- a/arch/x86/include/asm/paravirt_types.h
10709 +++ b/arch/x86/include/asm/paravirt_types.h
10710 @@ -84,20 +84,20 @@ struct pv_init_ops {
10711 */
10712 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10713 unsigned long addr, unsigned len);
10714 -};
10715 +} __no_const;
10716
10717
10718 struct pv_lazy_ops {
10719 /* Set deferred update mode, used for batching operations. */
10720 void (*enter)(void);
10721 void (*leave)(void);
10722 -};
10723 +} __no_const;
10724
10725 struct pv_time_ops {
10726 unsigned long long (*sched_clock)(void);
10727 unsigned long long (*steal_clock)(int cpu);
10728 unsigned long (*get_tsc_khz)(void);
10729 -};
10730 +} __no_const;
10731
10732 struct pv_cpu_ops {
10733 /* hooks for various privileged instructions */
10734 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10735
10736 void (*start_context_switch)(struct task_struct *prev);
10737 void (*end_context_switch)(struct task_struct *next);
10738 -};
10739 +} __no_const;
10740
10741 struct pv_irq_ops {
10742 /*
10743 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10744 unsigned long start_eip,
10745 unsigned long start_esp);
10746 #endif
10747 -};
10748 +} __no_const;
10749
10750 struct pv_mmu_ops {
10751 unsigned long (*read_cr2)(void);
10752 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10753 struct paravirt_callee_save make_pud;
10754
10755 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10756 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10757 #endif /* PAGETABLE_LEVELS == 4 */
10758 #endif /* PAGETABLE_LEVELS >= 3 */
10759
10760 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10761 an mfn. We can tell which is which from the index. */
10762 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10763 phys_addr_t phys, pgprot_t flags);
10764 +
10765 +#ifdef CONFIG_PAX_KERNEXEC
10766 + unsigned long (*pax_open_kernel)(void);
10767 + unsigned long (*pax_close_kernel)(void);
10768 +#endif
10769 +
10770 };
10771
10772 struct arch_spinlock;
10773 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10774 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10775 int (*spin_trylock)(struct arch_spinlock *lock);
10776 void (*spin_unlock)(struct arch_spinlock *lock);
10777 -};
10778 +} __no_const;
10779
10780 /* This contains all the paravirt structures: we get a convenient
10781 * number for each function using the offset which we use to indicate
10782 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10783 index b4389a4..b7ff22c 100644
10784 --- a/arch/x86/include/asm/pgalloc.h
10785 +++ b/arch/x86/include/asm/pgalloc.h
10786 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10787 pmd_t *pmd, pte_t *pte)
10788 {
10789 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10790 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10791 +}
10792 +
10793 +static inline void pmd_populate_user(struct mm_struct *mm,
10794 + pmd_t *pmd, pte_t *pte)
10795 +{
10796 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10797 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10798 }
10799
10800 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10801 index 98391db..8f6984e 100644
10802 --- a/arch/x86/include/asm/pgtable-2level.h
10803 +++ b/arch/x86/include/asm/pgtable-2level.h
10804 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10805
10806 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10807 {
10808 + pax_open_kernel();
10809 *pmdp = pmd;
10810 + pax_close_kernel();
10811 }
10812
10813 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10814 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10815 index effff47..f9e4035 100644
10816 --- a/arch/x86/include/asm/pgtable-3level.h
10817 +++ b/arch/x86/include/asm/pgtable-3level.h
10818 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10819
10820 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10821 {
10822 + pax_open_kernel();
10823 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10824 + pax_close_kernel();
10825 }
10826
10827 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10828 {
10829 + pax_open_kernel();
10830 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10831 + pax_close_kernel();
10832 }
10833
10834 /*
10835 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10836 index 49afb3f..ed14d07 100644
10837 --- a/arch/x86/include/asm/pgtable.h
10838 +++ b/arch/x86/include/asm/pgtable.h
10839 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10840
10841 #ifndef __PAGETABLE_PUD_FOLDED
10842 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10843 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10844 #define pgd_clear(pgd) native_pgd_clear(pgd)
10845 #endif
10846
10847 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10848
10849 #define arch_end_context_switch(prev) do {} while(0)
10850
10851 +#define pax_open_kernel() native_pax_open_kernel()
10852 +#define pax_close_kernel() native_pax_close_kernel()
10853 #endif /* CONFIG_PARAVIRT */
10854
10855 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10856 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10857 +
10858 +#ifdef CONFIG_PAX_KERNEXEC
10859 +static inline unsigned long native_pax_open_kernel(void)
10860 +{
10861 + unsigned long cr0;
10862 +
10863 + preempt_disable();
10864 + barrier();
10865 + cr0 = read_cr0() ^ X86_CR0_WP;
10866 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10867 + write_cr0(cr0);
10868 + return cr0 ^ X86_CR0_WP;
10869 +}
10870 +
10871 +static inline unsigned long native_pax_close_kernel(void)
10872 +{
10873 + unsigned long cr0;
10874 +
10875 + cr0 = read_cr0() ^ X86_CR0_WP;
10876 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10877 + write_cr0(cr0);
10878 + barrier();
10879 + preempt_enable_no_resched();
10880 + return cr0 ^ X86_CR0_WP;
10881 +}
10882 +#else
10883 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
10884 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
10885 +#endif
10886 +
10887 /*
10888 * The following only work if pte_present() is true.
10889 * Undefined behaviour if not..
10890 */
10891 +static inline int pte_user(pte_t pte)
10892 +{
10893 + return pte_val(pte) & _PAGE_USER;
10894 +}
10895 +
10896 static inline int pte_dirty(pte_t pte)
10897 {
10898 return pte_flags(pte) & _PAGE_DIRTY;
10899 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10900 return pte_clear_flags(pte, _PAGE_RW);
10901 }
10902
10903 +static inline pte_t pte_mkread(pte_t pte)
10904 +{
10905 + return __pte(pte_val(pte) | _PAGE_USER);
10906 +}
10907 +
10908 static inline pte_t pte_mkexec(pte_t pte)
10909 {
10910 - return pte_clear_flags(pte, _PAGE_NX);
10911 +#ifdef CONFIG_X86_PAE
10912 + if (__supported_pte_mask & _PAGE_NX)
10913 + return pte_clear_flags(pte, _PAGE_NX);
10914 + else
10915 +#endif
10916 + return pte_set_flags(pte, _PAGE_USER);
10917 +}
10918 +
10919 +static inline pte_t pte_exprotect(pte_t pte)
10920 +{
10921 +#ifdef CONFIG_X86_PAE
10922 + if (__supported_pte_mask & _PAGE_NX)
10923 + return pte_set_flags(pte, _PAGE_NX);
10924 + else
10925 +#endif
10926 + return pte_clear_flags(pte, _PAGE_USER);
10927 }
10928
10929 static inline pte_t pte_mkdirty(pte_t pte)
10930 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10931 #endif
10932
10933 #ifndef __ASSEMBLY__
10934 +
10935 +#ifdef CONFIG_PAX_PER_CPU_PGD
10936 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10937 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10938 +{
10939 + return cpu_pgd[cpu];
10940 +}
10941 +#endif
10942 +
10943 #include <linux/mm_types.h>
10944
10945 static inline int pte_none(pte_t pte)
10946 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10947
10948 static inline int pgd_bad(pgd_t pgd)
10949 {
10950 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10951 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10952 }
10953
10954 static inline int pgd_none(pgd_t pgd)
10955 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10956 * pgd_offset() returns a (pgd_t *)
10957 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10958 */
10959 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10960 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10961 +
10962 +#ifdef CONFIG_PAX_PER_CPU_PGD
10963 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10964 +#endif
10965 +
10966 /*
10967 * a shortcut which implies the use of the kernel's pgd, instead
10968 * of a process's
10969 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10970 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10971 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10972
10973 +#ifdef CONFIG_X86_32
10974 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10975 +#else
10976 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10977 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10978 +
10979 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10980 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10981 +#else
10982 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10983 +#endif
10984 +
10985 +#endif
10986 +
10987 #ifndef __ASSEMBLY__
10988
10989 extern int direct_gbpages;
10990 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10991 * dst and src can be on the same page, but the range must not overlap,
10992 * and must not cross a page boundary.
10993 */
10994 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10995 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10996 {
10997 - memcpy(dst, src, count * sizeof(pgd_t));
10998 + pax_open_kernel();
10999 + while (count--)
11000 + *dst++ = *src++;
11001 + pax_close_kernel();
11002 }
11003
11004 +#ifdef CONFIG_PAX_PER_CPU_PGD
11005 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11006 +#endif
11007 +
11008 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11009 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11010 +#else
11011 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11012 +#endif
11013
11014 #include <asm-generic/pgtable.h>
11015 #endif /* __ASSEMBLY__ */
11016 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11017 index 0c92113..34a77c6 100644
11018 --- a/arch/x86/include/asm/pgtable_32.h
11019 +++ b/arch/x86/include/asm/pgtable_32.h
11020 @@ -25,9 +25,6 @@
11021 struct mm_struct;
11022 struct vm_area_struct;
11023
11024 -extern pgd_t swapper_pg_dir[1024];
11025 -extern pgd_t initial_page_table[1024];
11026 -
11027 static inline void pgtable_cache_init(void) { }
11028 static inline void check_pgt_cache(void) { }
11029 void paging_init(void);
11030 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11031 # include <asm/pgtable-2level.h>
11032 #endif
11033
11034 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11035 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11036 +#ifdef CONFIG_X86_PAE
11037 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11038 +#endif
11039 +
11040 #if defined(CONFIG_HIGHPTE)
11041 #define pte_offset_map(dir, address) \
11042 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11043 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11044 /* Clear a kernel PTE and flush it from the TLB */
11045 #define kpte_clear_flush(ptep, vaddr) \
11046 do { \
11047 + pax_open_kernel(); \
11048 pte_clear(&init_mm, (vaddr), (ptep)); \
11049 + pax_close_kernel(); \
11050 __flush_tlb_one((vaddr)); \
11051 } while (0)
11052
11053 @@ -74,6 +79,9 @@ do { \
11054
11055 #endif /* !__ASSEMBLY__ */
11056
11057 +#define HAVE_ARCH_UNMAPPED_AREA
11058 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11059 +
11060 /*
11061 * kern_addr_valid() is (1) for FLATMEM and (0) for
11062 * SPARSEMEM and DISCONTIGMEM
11063 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11064 index ed5903b..c7fe163 100644
11065 --- a/arch/x86/include/asm/pgtable_32_types.h
11066 +++ b/arch/x86/include/asm/pgtable_32_types.h
11067 @@ -8,7 +8,7 @@
11068 */
11069 #ifdef CONFIG_X86_PAE
11070 # include <asm/pgtable-3level_types.h>
11071 -# define PMD_SIZE (1UL << PMD_SHIFT)
11072 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11073 # define PMD_MASK (~(PMD_SIZE - 1))
11074 #else
11075 # include <asm/pgtable-2level_types.h>
11076 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11077 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11078 #endif
11079
11080 +#ifdef CONFIG_PAX_KERNEXEC
11081 +#ifndef __ASSEMBLY__
11082 +extern unsigned char MODULES_EXEC_VADDR[];
11083 +extern unsigned char MODULES_EXEC_END[];
11084 +#endif
11085 +#include <asm/boot.h>
11086 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11087 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11088 +#else
11089 +#define ktla_ktva(addr) (addr)
11090 +#define ktva_ktla(addr) (addr)
11091 +#endif
11092 +
11093 #define MODULES_VADDR VMALLOC_START
11094 #define MODULES_END VMALLOC_END
11095 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11096 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11097 index 975f709..107976d 100644
11098 --- a/arch/x86/include/asm/pgtable_64.h
11099 +++ b/arch/x86/include/asm/pgtable_64.h
11100 @@ -16,10 +16,14 @@
11101
11102 extern pud_t level3_kernel_pgt[512];
11103 extern pud_t level3_ident_pgt[512];
11104 +extern pud_t level3_vmalloc_start_pgt[512];
11105 +extern pud_t level3_vmalloc_end_pgt[512];
11106 +extern pud_t level3_vmemmap_pgt[512];
11107 +extern pud_t level2_vmemmap_pgt[512];
11108 extern pmd_t level2_kernel_pgt[512];
11109 extern pmd_t level2_fixmap_pgt[512];
11110 -extern pmd_t level2_ident_pgt[512];
11111 -extern pgd_t init_level4_pgt[];
11112 +extern pmd_t level2_ident_pgt[512*2];
11113 +extern pgd_t init_level4_pgt[512];
11114
11115 #define swapper_pg_dir init_level4_pgt
11116
11117 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11118
11119 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11120 {
11121 + pax_open_kernel();
11122 *pmdp = pmd;
11123 + pax_close_kernel();
11124 }
11125
11126 static inline void native_pmd_clear(pmd_t *pmd)
11127 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11128
11129 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11130 {
11131 + pax_open_kernel();
11132 + *pgdp = pgd;
11133 + pax_close_kernel();
11134 +}
11135 +
11136 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11137 +{
11138 *pgdp = pgd;
11139 }
11140
11141 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11142 index 766ea16..5b96cb3 100644
11143 --- a/arch/x86/include/asm/pgtable_64_types.h
11144 +++ b/arch/x86/include/asm/pgtable_64_types.h
11145 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11146 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11147 #define MODULES_END _AC(0xffffffffff000000, UL)
11148 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11149 +#define MODULES_EXEC_VADDR MODULES_VADDR
11150 +#define MODULES_EXEC_END MODULES_END
11151 +
11152 +#define ktla_ktva(addr) (addr)
11153 +#define ktva_ktla(addr) (addr)
11154
11155 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11156 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11157 index 013286a..8b42f4f 100644
11158 --- a/arch/x86/include/asm/pgtable_types.h
11159 +++ b/arch/x86/include/asm/pgtable_types.h
11160 @@ -16,13 +16,12 @@
11161 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11162 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11163 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11164 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11165 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11166 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11167 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11168 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11169 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11170 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11171 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11172 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11173 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11174 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11175
11176 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11177 @@ -40,7 +39,6 @@
11178 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11179 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11180 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11181 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11182 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11183 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11184 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11185 @@ -57,8 +55,10 @@
11186
11187 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11188 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11189 -#else
11190 +#elif defined(CONFIG_KMEMCHECK)
11191 #define _PAGE_NX (_AT(pteval_t, 0))
11192 +#else
11193 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11194 #endif
11195
11196 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11197 @@ -96,6 +96,9 @@
11198 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11199 _PAGE_ACCESSED)
11200
11201 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11202 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11203 +
11204 #define __PAGE_KERNEL_EXEC \
11205 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11206 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11207 @@ -106,7 +109,7 @@
11208 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11209 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11210 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11211 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11212 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11213 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11214 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11215 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11216 @@ -168,8 +171,8 @@
11217 * bits are combined, this will alow user to access the high address mapped
11218 * VDSO in the presence of CONFIG_COMPAT_VDSO
11219 */
11220 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11221 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11222 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11223 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11224 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11225 #endif
11226
11227 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11228 {
11229 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11230 }
11231 +#endif
11232
11233 +#if PAGETABLE_LEVELS == 3
11234 +#include <asm-generic/pgtable-nopud.h>
11235 +#endif
11236 +
11237 +#if PAGETABLE_LEVELS == 2
11238 +#include <asm-generic/pgtable-nopmd.h>
11239 +#endif
11240 +
11241 +#ifndef __ASSEMBLY__
11242 #if PAGETABLE_LEVELS > 3
11243 typedef struct { pudval_t pud; } pud_t;
11244
11245 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11246 return pud.pud;
11247 }
11248 #else
11249 -#include <asm-generic/pgtable-nopud.h>
11250 -
11251 static inline pudval_t native_pud_val(pud_t pud)
11252 {
11253 return native_pgd_val(pud.pgd);
11254 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11255 return pmd.pmd;
11256 }
11257 #else
11258 -#include <asm-generic/pgtable-nopmd.h>
11259 -
11260 static inline pmdval_t native_pmd_val(pmd_t pmd)
11261 {
11262 return native_pgd_val(pmd.pud.pgd);
11263 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11264
11265 extern pteval_t __supported_pte_mask;
11266 extern void set_nx(void);
11267 -extern int nx_enabled;
11268
11269 #define pgprot_writecombine pgprot_writecombine
11270 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11271 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11272 index 58545c9..fe6fc38e 100644
11273 --- a/arch/x86/include/asm/processor.h
11274 +++ b/arch/x86/include/asm/processor.h
11275 @@ -266,7 +266,7 @@ struct tss_struct {
11276
11277 } ____cacheline_aligned;
11278
11279 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11280 +extern struct tss_struct init_tss[NR_CPUS];
11281
11282 /*
11283 * Save the original ist values for checking stack pointers during debugging
11284 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11285 */
11286 #define TASK_SIZE PAGE_OFFSET
11287 #define TASK_SIZE_MAX TASK_SIZE
11288 +
11289 +#ifdef CONFIG_PAX_SEGMEXEC
11290 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11291 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11292 +#else
11293 #define STACK_TOP TASK_SIZE
11294 -#define STACK_TOP_MAX STACK_TOP
11295 +#endif
11296 +
11297 +#define STACK_TOP_MAX TASK_SIZE
11298
11299 #define INIT_THREAD { \
11300 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11301 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11302 .vm86_info = NULL, \
11303 .sysenter_cs = __KERNEL_CS, \
11304 .io_bitmap_ptr = NULL, \
11305 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11306 */
11307 #define INIT_TSS { \
11308 .x86_tss = { \
11309 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11310 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11311 .ss0 = __KERNEL_DS, \
11312 .ss1 = __KERNEL_CS, \
11313 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11314 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11315 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11316
11317 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11318 -#define KSTK_TOP(info) \
11319 -({ \
11320 - unsigned long *__ptr = (unsigned long *)(info); \
11321 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11322 -})
11323 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11324
11325 /*
11326 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11327 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11328 #define task_pt_regs(task) \
11329 ({ \
11330 struct pt_regs *__regs__; \
11331 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11332 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11333 __regs__ - 1; \
11334 })
11335
11336 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11337 /*
11338 * User space process size. 47bits minus one guard page.
11339 */
11340 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11341 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11342
11343 /* This decides where the kernel will search for a free chunk of vm
11344 * space during mmap's.
11345 */
11346 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11347 - 0xc0000000 : 0xFFFFe000)
11348 + 0xc0000000 : 0xFFFFf000)
11349
11350 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11351 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11352 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11353 #define STACK_TOP_MAX TASK_SIZE_MAX
11354
11355 #define INIT_THREAD { \
11356 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11357 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11358 }
11359
11360 #define INIT_TSS { \
11361 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11362 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11363 }
11364
11365 /*
11366 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11367 */
11368 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11369
11370 +#ifdef CONFIG_PAX_SEGMEXEC
11371 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11372 +#endif
11373 +
11374 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11375
11376 /* Get/set a process' ability to use the timestamp counter instruction */
11377 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11378 index 3566454..4bdfb8c 100644
11379 --- a/arch/x86/include/asm/ptrace.h
11380 +++ b/arch/x86/include/asm/ptrace.h
11381 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11382 }
11383
11384 /*
11385 - * user_mode_vm(regs) determines whether a register set came from user mode.
11386 + * user_mode(regs) determines whether a register set came from user mode.
11387 * This is true if V8086 mode was enabled OR if the register set was from
11388 * protected mode with RPL-3 CS value. This tricky test checks that with
11389 * one comparison. Many places in the kernel can bypass this full check
11390 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11391 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11392 + * be used.
11393 */
11394 -static inline int user_mode(struct pt_regs *regs)
11395 +static inline int user_mode_novm(struct pt_regs *regs)
11396 {
11397 #ifdef CONFIG_X86_32
11398 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11399 #else
11400 - return !!(regs->cs & 3);
11401 + return !!(regs->cs & SEGMENT_RPL_MASK);
11402 #endif
11403 }
11404
11405 -static inline int user_mode_vm(struct pt_regs *regs)
11406 +static inline int user_mode(struct pt_regs *regs)
11407 {
11408 #ifdef CONFIG_X86_32
11409 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11410 USER_RPL;
11411 #else
11412 - return user_mode(regs);
11413 + return user_mode_novm(regs);
11414 #endif
11415 }
11416
11417 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11418 #ifdef CONFIG_X86_64
11419 static inline bool user_64bit_mode(struct pt_regs *regs)
11420 {
11421 + unsigned long cs = regs->cs & 0xffff;
11422 #ifndef CONFIG_PARAVIRT
11423 /*
11424 * On non-paravirt systems, this is the only long mode CPL 3
11425 * selector. We do not allow long mode selectors in the LDT.
11426 */
11427 - return regs->cs == __USER_CS;
11428 + return cs == __USER_CS;
11429 #else
11430 /* Headers are too twisted for this to go in paravirt.h. */
11431 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11432 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11433 #endif
11434 }
11435 #endif
11436 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11437 index 92f29706..a79cbbb 100644
11438 --- a/arch/x86/include/asm/reboot.h
11439 +++ b/arch/x86/include/asm/reboot.h
11440 @@ -6,19 +6,19 @@
11441 struct pt_regs;
11442
11443 struct machine_ops {
11444 - void (*restart)(char *cmd);
11445 - void (*halt)(void);
11446 - void (*power_off)(void);
11447 + void (* __noreturn restart)(char *cmd);
11448 + void (* __noreturn halt)(void);
11449 + void (* __noreturn power_off)(void);
11450 void (*shutdown)(void);
11451 void (*crash_shutdown)(struct pt_regs *);
11452 - void (*emergency_restart)(void);
11453 -};
11454 + void (* __noreturn emergency_restart)(void);
11455 +} __no_const;
11456
11457 extern struct machine_ops machine_ops;
11458
11459 void native_machine_crash_shutdown(struct pt_regs *regs);
11460 void native_machine_shutdown(void);
11461 -void machine_real_restart(unsigned int type);
11462 +void machine_real_restart(unsigned int type) __noreturn;
11463 /* These must match dispatch_table in reboot_32.S */
11464 #define MRR_BIOS 0
11465 #define MRR_APM 1
11466 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11467 index 2dbe4a7..ce1db00 100644
11468 --- a/arch/x86/include/asm/rwsem.h
11469 +++ b/arch/x86/include/asm/rwsem.h
11470 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11471 {
11472 asm volatile("# beginning down_read\n\t"
11473 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11474 +
11475 +#ifdef CONFIG_PAX_REFCOUNT
11476 + "jno 0f\n"
11477 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11478 + "int $4\n0:\n"
11479 + _ASM_EXTABLE(0b, 0b)
11480 +#endif
11481 +
11482 /* adds 0x00000001 */
11483 " jns 1f\n"
11484 " call call_rwsem_down_read_failed\n"
11485 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11486 "1:\n\t"
11487 " mov %1,%2\n\t"
11488 " add %3,%2\n\t"
11489 +
11490 +#ifdef CONFIG_PAX_REFCOUNT
11491 + "jno 0f\n"
11492 + "sub %3,%2\n"
11493 + "int $4\n0:\n"
11494 + _ASM_EXTABLE(0b, 0b)
11495 +#endif
11496 +
11497 " jle 2f\n\t"
11498 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11499 " jnz 1b\n\t"
11500 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11501 long tmp;
11502 asm volatile("# beginning down_write\n\t"
11503 LOCK_PREFIX " xadd %1,(%2)\n\t"
11504 +
11505 +#ifdef CONFIG_PAX_REFCOUNT
11506 + "jno 0f\n"
11507 + "mov %1,(%2)\n"
11508 + "int $4\n0:\n"
11509 + _ASM_EXTABLE(0b, 0b)
11510 +#endif
11511 +
11512 /* adds 0xffff0001, returns the old value */
11513 " test %1,%1\n\t"
11514 /* was the count 0 before? */
11515 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11516 long tmp;
11517 asm volatile("# beginning __up_read\n\t"
11518 LOCK_PREFIX " xadd %1,(%2)\n\t"
11519 +
11520 +#ifdef CONFIG_PAX_REFCOUNT
11521 + "jno 0f\n"
11522 + "mov %1,(%2)\n"
11523 + "int $4\n0:\n"
11524 + _ASM_EXTABLE(0b, 0b)
11525 +#endif
11526 +
11527 /* subtracts 1, returns the old value */
11528 " jns 1f\n\t"
11529 " call call_rwsem_wake\n" /* expects old value in %edx */
11530 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11531 long tmp;
11532 asm volatile("# beginning __up_write\n\t"
11533 LOCK_PREFIX " xadd %1,(%2)\n\t"
11534 +
11535 +#ifdef CONFIG_PAX_REFCOUNT
11536 + "jno 0f\n"
11537 + "mov %1,(%2)\n"
11538 + "int $4\n0:\n"
11539 + _ASM_EXTABLE(0b, 0b)
11540 +#endif
11541 +
11542 /* subtracts 0xffff0001, returns the old value */
11543 " jns 1f\n\t"
11544 " call call_rwsem_wake\n" /* expects old value in %edx */
11545 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11546 {
11547 asm volatile("# beginning __downgrade_write\n\t"
11548 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11549 +
11550 +#ifdef CONFIG_PAX_REFCOUNT
11551 + "jno 0f\n"
11552 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11553 + "int $4\n0:\n"
11554 + _ASM_EXTABLE(0b, 0b)
11555 +#endif
11556 +
11557 /*
11558 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11559 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11560 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11561 */
11562 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11563 {
11564 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11565 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11566 +
11567 +#ifdef CONFIG_PAX_REFCOUNT
11568 + "jno 0f\n"
11569 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11570 + "int $4\n0:\n"
11571 + _ASM_EXTABLE(0b, 0b)
11572 +#endif
11573 +
11574 : "+m" (sem->count)
11575 : "er" (delta));
11576 }
11577 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11578 */
11579 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11580 {
11581 - return delta + xadd(&sem->count, delta);
11582 + return delta + xadd_check_overflow(&sem->count, delta);
11583 }
11584
11585 #endif /* __KERNEL__ */
11586 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11587 index 5e64171..f58957e 100644
11588 --- a/arch/x86/include/asm/segment.h
11589 +++ b/arch/x86/include/asm/segment.h
11590 @@ -64,10 +64,15 @@
11591 * 26 - ESPFIX small SS
11592 * 27 - per-cpu [ offset to per-cpu data area ]
11593 * 28 - stack_canary-20 [ for stack protector ]
11594 - * 29 - unused
11595 - * 30 - unused
11596 + * 29 - PCI BIOS CS
11597 + * 30 - PCI BIOS DS
11598 * 31 - TSS for double fault handler
11599 */
11600 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11601 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11602 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11603 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11604 +
11605 #define GDT_ENTRY_TLS_MIN 6
11606 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11607
11608 @@ -79,6 +84,8 @@
11609
11610 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11611
11612 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11613 +
11614 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11615
11616 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11617 @@ -104,6 +111,12 @@
11618 #define __KERNEL_STACK_CANARY 0
11619 #endif
11620
11621 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11622 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11623 +
11624 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11625 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11626 +
11627 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11628
11629 /*
11630 @@ -141,7 +154,7 @@
11631 */
11632
11633 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11634 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11635 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11636
11637
11638 #else
11639 @@ -165,6 +178,8 @@
11640 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11641 #define __USER32_DS __USER_DS
11642
11643 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11644 +
11645 #define GDT_ENTRY_TSS 8 /* needs two entries */
11646 #define GDT_ENTRY_LDT 10 /* needs two entries */
11647 #define GDT_ENTRY_TLS_MIN 12
11648 @@ -185,6 +200,7 @@
11649 #endif
11650
11651 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11652 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11653 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11654 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11655 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11656 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11657 index 0434c40..1714bf0 100644
11658 --- a/arch/x86/include/asm/smp.h
11659 +++ b/arch/x86/include/asm/smp.h
11660 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11661 /* cpus sharing the last level cache: */
11662 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11663 DECLARE_PER_CPU(u16, cpu_llc_id);
11664 -DECLARE_PER_CPU(int, cpu_number);
11665 +DECLARE_PER_CPU(unsigned int, cpu_number);
11666
11667 static inline struct cpumask *cpu_sibling_mask(int cpu)
11668 {
11669 @@ -77,7 +77,7 @@ struct smp_ops {
11670
11671 void (*send_call_func_ipi)(const struct cpumask *mask);
11672 void (*send_call_func_single_ipi)(int cpu);
11673 -};
11674 +} __no_const;
11675
11676 /* Globals due to paravirt */
11677 extern void set_cpu_sibling_map(int cpu);
11678 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11679 extern int safe_smp_processor_id(void);
11680
11681 #elif defined(CONFIG_X86_64_SMP)
11682 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11683 -
11684 -#define stack_smp_processor_id() \
11685 -({ \
11686 - struct thread_info *ti; \
11687 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11688 - ti->cpu; \
11689 -})
11690 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11691 +#define stack_smp_processor_id() raw_smp_processor_id()
11692 #define safe_smp_processor_id() smp_processor_id()
11693
11694 #endif
11695 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11696 index a82c2bf..2198f61 100644
11697 --- a/arch/x86/include/asm/spinlock.h
11698 +++ b/arch/x86/include/asm/spinlock.h
11699 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11700 static inline void arch_read_lock(arch_rwlock_t *rw)
11701 {
11702 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11703 +
11704 +#ifdef CONFIG_PAX_REFCOUNT
11705 + "jno 0f\n"
11706 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11707 + "int $4\n0:\n"
11708 + _ASM_EXTABLE(0b, 0b)
11709 +#endif
11710 +
11711 "jns 1f\n"
11712 "call __read_lock_failed\n\t"
11713 "1:\n"
11714 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11715 static inline void arch_write_lock(arch_rwlock_t *rw)
11716 {
11717 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11718 +
11719 +#ifdef CONFIG_PAX_REFCOUNT
11720 + "jno 0f\n"
11721 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11722 + "int $4\n0:\n"
11723 + _ASM_EXTABLE(0b, 0b)
11724 +#endif
11725 +
11726 "jz 1f\n"
11727 "call __write_lock_failed\n\t"
11728 "1:\n"
11729 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11730
11731 static inline void arch_read_unlock(arch_rwlock_t *rw)
11732 {
11733 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11734 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11735 +
11736 +#ifdef CONFIG_PAX_REFCOUNT
11737 + "jno 0f\n"
11738 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11739 + "int $4\n0:\n"
11740 + _ASM_EXTABLE(0b, 0b)
11741 +#endif
11742 +
11743 :"+m" (rw->lock) : : "memory");
11744 }
11745
11746 static inline void arch_write_unlock(arch_rwlock_t *rw)
11747 {
11748 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11749 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11750 +
11751 +#ifdef CONFIG_PAX_REFCOUNT
11752 + "jno 0f\n"
11753 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11754 + "int $4\n0:\n"
11755 + _ASM_EXTABLE(0b, 0b)
11756 +#endif
11757 +
11758 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11759 }
11760
11761 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11762 index 1575177..cb23f52 100644
11763 --- a/arch/x86/include/asm/stackprotector.h
11764 +++ b/arch/x86/include/asm/stackprotector.h
11765 @@ -48,7 +48,7 @@
11766 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11767 */
11768 #define GDT_STACK_CANARY_INIT \
11769 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11770 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11771
11772 /*
11773 * Initialize the stackprotector canary value.
11774 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11775
11776 static inline void load_stack_canary_segment(void)
11777 {
11778 -#ifdef CONFIG_X86_32
11779 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11780 asm volatile ("mov %0, %%gs" : : "r" (0));
11781 #endif
11782 }
11783 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11784 index 70bbe39..4ae2bd4 100644
11785 --- a/arch/x86/include/asm/stacktrace.h
11786 +++ b/arch/x86/include/asm/stacktrace.h
11787 @@ -11,28 +11,20 @@
11788
11789 extern int kstack_depth_to_print;
11790
11791 -struct thread_info;
11792 +struct task_struct;
11793 struct stacktrace_ops;
11794
11795 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11796 - unsigned long *stack,
11797 - unsigned long bp,
11798 - const struct stacktrace_ops *ops,
11799 - void *data,
11800 - unsigned long *end,
11801 - int *graph);
11802 +typedef unsigned long walk_stack_t(struct task_struct *task,
11803 + void *stack_start,
11804 + unsigned long *stack,
11805 + unsigned long bp,
11806 + const struct stacktrace_ops *ops,
11807 + void *data,
11808 + unsigned long *end,
11809 + int *graph);
11810
11811 -extern unsigned long
11812 -print_context_stack(struct thread_info *tinfo,
11813 - unsigned long *stack, unsigned long bp,
11814 - const struct stacktrace_ops *ops, void *data,
11815 - unsigned long *end, int *graph);
11816 -
11817 -extern unsigned long
11818 -print_context_stack_bp(struct thread_info *tinfo,
11819 - unsigned long *stack, unsigned long bp,
11820 - const struct stacktrace_ops *ops, void *data,
11821 - unsigned long *end, int *graph);
11822 +extern walk_stack_t print_context_stack;
11823 +extern walk_stack_t print_context_stack_bp;
11824
11825 /* Generic stack tracer with callbacks */
11826
11827 @@ -40,7 +32,7 @@ struct stacktrace_ops {
11828 void (*address)(void *data, unsigned long address, int reliable);
11829 /* On negative return stop dumping */
11830 int (*stack)(void *data, char *name);
11831 - walk_stack_t walk_stack;
11832 + walk_stack_t *walk_stack;
11833 };
11834
11835 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11836 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11837 index cb23852..2dde194 100644
11838 --- a/arch/x86/include/asm/sys_ia32.h
11839 +++ b/arch/x86/include/asm/sys_ia32.h
11840 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11841 compat_sigset_t __user *, unsigned int);
11842 asmlinkage long sys32_alarm(unsigned int);
11843
11844 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11845 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11846 asmlinkage long sys32_sysfs(int, u32, u32);
11847
11848 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11849 diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
11850 index f1d8b44..a4de8b7 100644
11851 --- a/arch/x86/include/asm/syscalls.h
11852 +++ b/arch/x86/include/asm/syscalls.h
11853 @@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *,
11854 void __user *, struct pt_regs *);
11855
11856 /* kernel/ldt.c */
11857 -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
11858 +asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
11859
11860 /* kernel/signal.c */
11861 long sys_rt_sigreturn(struct pt_regs *);
11862 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11863 index 2d2f01c..f985723 100644
11864 --- a/arch/x86/include/asm/system.h
11865 +++ b/arch/x86/include/asm/system.h
11866 @@ -129,7 +129,7 @@ do { \
11867 "call __switch_to\n\t" \
11868 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11869 __switch_canary \
11870 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11871 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11872 "movq %%rax,%%rdi\n\t" \
11873 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11874 "jnz ret_from_fork\n\t" \
11875 @@ -140,7 +140,7 @@ do { \
11876 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11877 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11878 [_tif_fork] "i" (_TIF_FORK), \
11879 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11880 + [thread_info] "m" (current_tinfo), \
11881 [current_task] "m" (current_task) \
11882 __switch_canary_iparam \
11883 : "memory", "cc" __EXTRA_CLOBBER)
11884 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11885 {
11886 unsigned long __limit;
11887 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11888 - return __limit + 1;
11889 + return __limit;
11890 }
11891
11892 static inline void native_clts(void)
11893 @@ -397,13 +397,13 @@ void enable_hlt(void);
11894
11895 void cpu_idle_wait(void);
11896
11897 -extern unsigned long arch_align_stack(unsigned long sp);
11898 +#define arch_align_stack(x) ((x) & ~0xfUL)
11899 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11900
11901 void default_idle(void);
11902 bool set_pm_idle_to_default(void);
11903
11904 -void stop_this_cpu(void *dummy);
11905 +void stop_this_cpu(void *dummy) __noreturn;
11906
11907 /*
11908 * Force strict CPU ordering.
11909 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11910 index cfd8144..1b1127d 100644
11911 --- a/arch/x86/include/asm/thread_info.h
11912 +++ b/arch/x86/include/asm/thread_info.h
11913 @@ -10,6 +10,7 @@
11914 #include <linux/compiler.h>
11915 #include <asm/page.h>
11916 #include <asm/types.h>
11917 +#include <asm/percpu.h>
11918
11919 /*
11920 * low level task data that entry.S needs immediate access to
11921 @@ -24,7 +25,6 @@ struct exec_domain;
11922 #include <linux/atomic.h>
11923
11924 struct thread_info {
11925 - struct task_struct *task; /* main task structure */
11926 struct exec_domain *exec_domain; /* execution domain */
11927 __u32 flags; /* low level flags */
11928 __u32 status; /* thread synchronous flags */
11929 @@ -34,19 +34,13 @@ struct thread_info {
11930 mm_segment_t addr_limit;
11931 struct restart_block restart_block;
11932 void __user *sysenter_return;
11933 -#ifdef CONFIG_X86_32
11934 - unsigned long previous_esp; /* ESP of the previous stack in
11935 - case of nested (IRQ) stacks
11936 - */
11937 - __u8 supervisor_stack[0];
11938 -#endif
11939 + unsigned long lowest_stack;
11940 unsigned int sig_on_uaccess_error:1;
11941 unsigned int uaccess_err:1; /* uaccess failed */
11942 };
11943
11944 -#define INIT_THREAD_INFO(tsk) \
11945 +#define INIT_THREAD_INFO \
11946 { \
11947 - .task = &tsk, \
11948 .exec_domain = &default_exec_domain, \
11949 .flags = 0, \
11950 .cpu = 0, \
11951 @@ -57,7 +51,7 @@ struct thread_info {
11952 }, \
11953 }
11954
11955 -#define init_thread_info (init_thread_union.thread_info)
11956 +#define init_thread_info (init_thread_union.stack)
11957 #define init_stack (init_thread_union.stack)
11958
11959 #else /* !__ASSEMBLY__ */
11960 @@ -169,45 +163,40 @@ struct thread_info {
11961 ret; \
11962 })
11963
11964 -#ifdef CONFIG_X86_32
11965 -
11966 -#define STACK_WARN (THREAD_SIZE/8)
11967 -/*
11968 - * macros/functions for gaining access to the thread information structure
11969 - *
11970 - * preempt_count needs to be 1 initially, until the scheduler is functional.
11971 - */
11972 -#ifndef __ASSEMBLY__
11973 -
11974 -
11975 -/* how to get the current stack pointer from C */
11976 -register unsigned long current_stack_pointer asm("esp") __used;
11977 -
11978 -/* how to get the thread information struct from C */
11979 -static inline struct thread_info *current_thread_info(void)
11980 -{
11981 - return (struct thread_info *)
11982 - (current_stack_pointer & ~(THREAD_SIZE - 1));
11983 -}
11984 -
11985 -#else /* !__ASSEMBLY__ */
11986 -
11987 +#ifdef __ASSEMBLY__
11988 /* how to get the thread information struct from ASM */
11989 #define GET_THREAD_INFO(reg) \
11990 - movl $-THREAD_SIZE, reg; \
11991 - andl %esp, reg
11992 + mov PER_CPU_VAR(current_tinfo), reg
11993
11994 /* use this one if reg already contains %esp */
11995 -#define GET_THREAD_INFO_WITH_ESP(reg) \
11996 - andl $-THREAD_SIZE, reg
11997 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11998 +#else
11999 +/* how to get the thread information struct from C */
12000 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12001 +
12002 +static __always_inline struct thread_info *current_thread_info(void)
12003 +{
12004 + return percpu_read_stable(current_tinfo);
12005 +}
12006 +#endif
12007 +
12008 +#ifdef CONFIG_X86_32
12009 +
12010 +#define STACK_WARN (THREAD_SIZE/8)
12011 +/*
12012 + * macros/functions for gaining access to the thread information structure
12013 + *
12014 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12015 + */
12016 +#ifndef __ASSEMBLY__
12017 +
12018 +/* how to get the current stack pointer from C */
12019 +register unsigned long current_stack_pointer asm("esp") __used;
12020
12021 #endif
12022
12023 #else /* X86_32 */
12024
12025 -#include <asm/percpu.h>
12026 -#define KERNEL_STACK_OFFSET (5*8)
12027 -
12028 /*
12029 * macros/functions for gaining access to the thread information structure
12030 * preempt_count needs to be 1 initially, until the scheduler is functional.
12031 @@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
12032 #ifndef __ASSEMBLY__
12033 DECLARE_PER_CPU(unsigned long, kernel_stack);
12034
12035 -static inline struct thread_info *current_thread_info(void)
12036 -{
12037 - struct thread_info *ti;
12038 - ti = (void *)(percpu_read_stable(kernel_stack) +
12039 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12040 - return ti;
12041 -}
12042 -
12043 -#else /* !__ASSEMBLY__ */
12044 -
12045 -/* how to get the thread information struct from ASM */
12046 -#define GET_THREAD_INFO(reg) \
12047 - movq PER_CPU_VAR(kernel_stack),reg ; \
12048 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12049 -
12050 -/*
12051 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12052 - * a certain register (to be used in assembler memory operands).
12053 - */
12054 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12055 -
12056 +/* how to get the current stack pointer from C */
12057 +register unsigned long current_stack_pointer asm("rsp") __used;
12058 #endif
12059
12060 #endif /* !X86_32 */
12061 @@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
12062 extern void free_thread_info(struct thread_info *ti);
12063 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12064 #define arch_task_cache_init arch_task_cache_init
12065 +
12066 +#define __HAVE_THREAD_FUNCTIONS
12067 +#define task_thread_info(task) (&(task)->tinfo)
12068 +#define task_stack_page(task) ((task)->stack)
12069 +#define setup_thread_stack(p, org) do {} while (0)
12070 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12071 +
12072 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12073 +extern struct task_struct *alloc_task_struct_node(int node);
12074 +extern void free_task_struct(struct task_struct *);
12075 +
12076 #endif
12077 #endif /* _ASM_X86_THREAD_INFO_H */
12078 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12079 index 8be5f54..7ae826d 100644
12080 --- a/arch/x86/include/asm/uaccess.h
12081 +++ b/arch/x86/include/asm/uaccess.h
12082 @@ -7,12 +7,15 @@
12083 #include <linux/compiler.h>
12084 #include <linux/thread_info.h>
12085 #include <linux/string.h>
12086 +#include <linux/sched.h>
12087 #include <asm/asm.h>
12088 #include <asm/page.h>
12089
12090 #define VERIFY_READ 0
12091 #define VERIFY_WRITE 1
12092
12093 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12094 +
12095 /*
12096 * The fs value determines whether argument validity checking should be
12097 * performed or not. If get_fs() == USER_DS, checking is performed, with
12098 @@ -28,7 +31,12 @@
12099
12100 #define get_ds() (KERNEL_DS)
12101 #define get_fs() (current_thread_info()->addr_limit)
12102 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12103 +void __set_fs(mm_segment_t x);
12104 +void set_fs(mm_segment_t x);
12105 +#else
12106 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12107 +#endif
12108
12109 #define segment_eq(a, b) ((a).seg == (b).seg)
12110
12111 @@ -76,7 +84,33 @@
12112 * checks that the pointer is in the user space range - after calling
12113 * this function, memory access functions may still return -EFAULT.
12114 */
12115 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12116 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12117 +#define access_ok(type, addr, size) \
12118 +({ \
12119 + long __size = size; \
12120 + unsigned long __addr = (unsigned long)addr; \
12121 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12122 + unsigned long __end_ao = __addr + __size - 1; \
12123 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12124 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12125 + while(__addr_ao <= __end_ao) { \
12126 + char __c_ao; \
12127 + __addr_ao += PAGE_SIZE; \
12128 + if (__size > PAGE_SIZE) \
12129 + cond_resched(); \
12130 + if (__get_user(__c_ao, (char __user *)__addr)) \
12131 + break; \
12132 + if (type != VERIFY_WRITE) { \
12133 + __addr = __addr_ao; \
12134 + continue; \
12135 + } \
12136 + if (__put_user(__c_ao, (char __user *)__addr)) \
12137 + break; \
12138 + __addr = __addr_ao; \
12139 + } \
12140 + } \
12141 + __ret_ao; \
12142 +})
12143
12144 /*
12145 * The exception table consists of pairs of addresses: the first is the
12146 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12147 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12148 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12149
12150 -
12151 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12152 +#define __copyuser_seg "gs;"
12153 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12154 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12155 +#else
12156 +#define __copyuser_seg
12157 +#define __COPYUSER_SET_ES
12158 +#define __COPYUSER_RESTORE_ES
12159 +#endif
12160
12161 #ifdef CONFIG_X86_32
12162 #define __put_user_asm_u64(x, addr, err, errret) \
12163 - asm volatile("1: movl %%eax,0(%2)\n" \
12164 - "2: movl %%edx,4(%2)\n" \
12165 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12166 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12167 "3:\n" \
12168 ".section .fixup,\"ax\"\n" \
12169 "4: movl %3,%0\n" \
12170 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12171 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12172
12173 #define __put_user_asm_ex_u64(x, addr) \
12174 - asm volatile("1: movl %%eax,0(%1)\n" \
12175 - "2: movl %%edx,4(%1)\n" \
12176 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12177 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12178 "3:\n" \
12179 _ASM_EXTABLE(1b, 2b - 1b) \
12180 _ASM_EXTABLE(2b, 3b - 2b) \
12181 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12182 __typeof__(*(ptr)) __pu_val; \
12183 __chk_user_ptr(ptr); \
12184 might_fault(); \
12185 - __pu_val = x; \
12186 + __pu_val = (x); \
12187 switch (sizeof(*(ptr))) { \
12188 case 1: \
12189 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12190 @@ -373,7 +415,7 @@ do { \
12191 } while (0)
12192
12193 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12194 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12195 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12196 "2:\n" \
12197 ".section .fixup,\"ax\"\n" \
12198 "3: mov %3,%0\n" \
12199 @@ -381,7 +423,7 @@ do { \
12200 " jmp 2b\n" \
12201 ".previous\n" \
12202 _ASM_EXTABLE(1b, 3b) \
12203 - : "=r" (err), ltype(x) \
12204 + : "=r" (err), ltype (x) \
12205 : "m" (__m(addr)), "i" (errret), "0" (err))
12206
12207 #define __get_user_size_ex(x, ptr, size) \
12208 @@ -406,7 +448,7 @@ do { \
12209 } while (0)
12210
12211 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12212 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12213 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12214 "2:\n" \
12215 _ASM_EXTABLE(1b, 2b - 1b) \
12216 : ltype(x) : "m" (__m(addr)))
12217 @@ -423,13 +465,24 @@ do { \
12218 int __gu_err; \
12219 unsigned long __gu_val; \
12220 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12221 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12222 + (x) = (__typeof__(*(ptr)))__gu_val; \
12223 __gu_err; \
12224 })
12225
12226 /* FIXME: this hack is definitely wrong -AK */
12227 struct __large_struct { unsigned long buf[100]; };
12228 -#define __m(x) (*(struct __large_struct __user *)(x))
12229 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12230 +#define ____m(x) \
12231 +({ \
12232 + unsigned long ____x = (unsigned long)(x); \
12233 + if (____x < PAX_USER_SHADOW_BASE) \
12234 + ____x += PAX_USER_SHADOW_BASE; \
12235 + (void __user *)____x; \
12236 +})
12237 +#else
12238 +#define ____m(x) (x)
12239 +#endif
12240 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12241
12242 /*
12243 * Tell gcc we read from memory instead of writing: this is because
12244 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12245 * aliasing issues.
12246 */
12247 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12248 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12249 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12250 "2:\n" \
12251 ".section .fixup,\"ax\"\n" \
12252 "3: mov %3,%0\n" \
12253 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12254 ".previous\n" \
12255 _ASM_EXTABLE(1b, 3b) \
12256 : "=r"(err) \
12257 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12258 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12259
12260 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12261 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12262 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12263 "2:\n" \
12264 _ASM_EXTABLE(1b, 2b - 1b) \
12265 : : ltype(x), "m" (__m(addr)))
12266 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12267 * On error, the variable @x is set to zero.
12268 */
12269
12270 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12271 +#define __get_user(x, ptr) get_user((x), (ptr))
12272 +#else
12273 #define __get_user(x, ptr) \
12274 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12275 +#endif
12276
12277 /**
12278 * __put_user: - Write a simple value into user space, with less checking.
12279 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12280 * Returns zero on success, or -EFAULT on error.
12281 */
12282
12283 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12284 +#define __put_user(x, ptr) put_user((x), (ptr))
12285 +#else
12286 #define __put_user(x, ptr) \
12287 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12288 +#endif
12289
12290 #define __get_user_unaligned __get_user
12291 #define __put_user_unaligned __put_user
12292 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12293 #define get_user_ex(x, ptr) do { \
12294 unsigned long __gue_val; \
12295 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12296 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12297 + (x) = (__typeof__(*(ptr)))__gue_val; \
12298 } while (0)
12299
12300 #ifdef CONFIG_X86_WP_WORKS_OK
12301 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12302 index 566e803..7183d0b 100644
12303 --- a/arch/x86/include/asm/uaccess_32.h
12304 +++ b/arch/x86/include/asm/uaccess_32.h
12305 @@ -11,15 +11,15 @@
12306 #include <asm/page.h>
12307
12308 unsigned long __must_check __copy_to_user_ll
12309 - (void __user *to, const void *from, unsigned long n);
12310 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12311 unsigned long __must_check __copy_from_user_ll
12312 - (void *to, const void __user *from, unsigned long n);
12313 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12314 unsigned long __must_check __copy_from_user_ll_nozero
12315 - (void *to, const void __user *from, unsigned long n);
12316 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12317 unsigned long __must_check __copy_from_user_ll_nocache
12318 - (void *to, const void __user *from, unsigned long n);
12319 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12320 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12321 - (void *to, const void __user *from, unsigned long n);
12322 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12323
12324 /**
12325 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12326 @@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12327 */
12328
12329 static __always_inline unsigned long __must_check
12330 +__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12331 +static __always_inline unsigned long __must_check
12332 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12333 {
12334 + if ((long)n < 0)
12335 + return n;
12336 +
12337 if (__builtin_constant_p(n)) {
12338 unsigned long ret;
12339
12340 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12341 return ret;
12342 }
12343 }
12344 + if (!__builtin_constant_p(n))
12345 + check_object_size(from, n, true);
12346 return __copy_to_user_ll(to, from, n);
12347 }
12348
12349 @@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12350 * On success, this will be zero.
12351 */
12352 static __always_inline unsigned long __must_check
12353 +__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12354 +static __always_inline unsigned long __must_check
12355 __copy_to_user(void __user *to, const void *from, unsigned long n)
12356 {
12357 might_fault();
12358 +
12359 return __copy_to_user_inatomic(to, from, n);
12360 }
12361
12362 static __always_inline unsigned long
12363 +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12364 +static __always_inline unsigned long
12365 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12366 {
12367 + if ((long)n < 0)
12368 + return n;
12369 +
12370 /* Avoid zeroing the tail if the copy fails..
12371 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12372 * but as the zeroing behaviour is only significant when n is not
12373 @@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12374 * for explanation of why this is needed.
12375 */
12376 static __always_inline unsigned long
12377 +__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12378 +static __always_inline unsigned long
12379 __copy_from_user(void *to, const void __user *from, unsigned long n)
12380 {
12381 might_fault();
12382 +
12383 + if ((long)n < 0)
12384 + return n;
12385 +
12386 if (__builtin_constant_p(n)) {
12387 unsigned long ret;
12388
12389 @@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12390 return ret;
12391 }
12392 }
12393 + if (!__builtin_constant_p(n))
12394 + check_object_size(to, n, false);
12395 return __copy_from_user_ll(to, from, n);
12396 }
12397
12398 static __always_inline unsigned long __copy_from_user_nocache(void *to,
12399 + const void __user *from, unsigned long n) __size_overflow(3);
12400 +static __always_inline unsigned long __copy_from_user_nocache(void *to,
12401 const void __user *from, unsigned long n)
12402 {
12403 might_fault();
12404 +
12405 + if ((long)n < 0)
12406 + return n;
12407 +
12408 if (__builtin_constant_p(n)) {
12409 unsigned long ret;
12410
12411 @@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12412
12413 static __always_inline unsigned long
12414 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12415 + unsigned long n) __size_overflow(3);
12416 +static __always_inline unsigned long
12417 +__copy_from_user_inatomic_nocache(void *to, const void __user *from,
12418 unsigned long n)
12419 {
12420 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12421 + if ((long)n < 0)
12422 + return n;
12423 +
12424 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12425 }
12426
12427 -unsigned long __must_check copy_to_user(void __user *to,
12428 - const void *from, unsigned long n);
12429 -unsigned long __must_check _copy_from_user(void *to,
12430 - const void __user *from,
12431 - unsigned long n);
12432 -
12433 +extern void copy_to_user_overflow(void)
12434 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12435 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12436 +#else
12437 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12438 +#endif
12439 +;
12440
12441 extern void copy_from_user_overflow(void)
12442 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12443 @@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void)
12444 #endif
12445 ;
12446
12447 -static inline unsigned long __must_check copy_from_user(void *to,
12448 - const void __user *from,
12449 - unsigned long n)
12450 +/**
12451 + * copy_to_user: - Copy a block of data into user space.
12452 + * @to: Destination address, in user space.
12453 + * @from: Source address, in kernel space.
12454 + * @n: Number of bytes to copy.
12455 + *
12456 + * Context: User context only. This function may sleep.
12457 + *
12458 + * Copy data from kernel space to user space.
12459 + *
12460 + * Returns number of bytes that could not be copied.
12461 + * On success, this will be zero.
12462 + */
12463 +static inline unsigned long __must_check
12464 +copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12465 +static inline unsigned long __must_check
12466 +copy_to_user(void __user *to, const void *from, unsigned long n)
12467 +{
12468 + int sz = __compiletime_object_size(from);
12469 +
12470 + if (unlikely(sz != -1 && sz < n))
12471 + copy_to_user_overflow();
12472 + else if (access_ok(VERIFY_WRITE, to, n))
12473 + n = __copy_to_user(to, from, n);
12474 + return n;
12475 +}
12476 +
12477 +/**
12478 + * copy_from_user: - Copy a block of data from user space.
12479 + * @to: Destination address, in kernel space.
12480 + * @from: Source address, in user space.
12481 + * @n: Number of bytes to copy.
12482 + *
12483 + * Context: User context only. This function may sleep.
12484 + *
12485 + * Copy data from user space to kernel space.
12486 + *
12487 + * Returns number of bytes that could not be copied.
12488 + * On success, this will be zero.
12489 + *
12490 + * If some data could not be copied, this function will pad the copied
12491 + * data to the requested size using zero bytes.
12492 + */
12493 +static inline unsigned long __must_check
12494 +copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12495 +static inline unsigned long __must_check
12496 +copy_from_user(void *to, const void __user *from, unsigned long n)
12497 {
12498 int sz = __compiletime_object_size(to);
12499
12500 - if (likely(sz == -1 || sz >= n))
12501 - n = _copy_from_user(to, from, n);
12502 - else
12503 + if (unlikely(sz != -1 && sz < n))
12504 copy_from_user_overflow();
12505 -
12506 + else if (access_ok(VERIFY_READ, from, n))
12507 + n = __copy_from_user(to, from, n);
12508 + else if ((long)n > 0) {
12509 + if (!__builtin_constant_p(n))
12510 + check_object_size(to, n, false);
12511 + memset(to, 0, n);
12512 + }
12513 return n;
12514 }
12515
12516 @@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst,
12517 #define strlen_user(str) strnlen_user(str, LONG_MAX)
12518
12519 long strnlen_user(const char __user *str, long n);
12520 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
12521 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
12522 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12523 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12524
12525 #endif /* _ASM_X86_UACCESS_32_H */
12526 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12527 index 1c66d30..e294b5f 100644
12528 --- a/arch/x86/include/asm/uaccess_64.h
12529 +++ b/arch/x86/include/asm/uaccess_64.h
12530 @@ -10,6 +10,9 @@
12531 #include <asm/alternative.h>
12532 #include <asm/cpufeature.h>
12533 #include <asm/page.h>
12534 +#include <asm/pgtable.h>
12535 +
12536 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12537
12538 /*
12539 * Copy To/From Userspace
12540 @@ -17,12 +20,14 @@
12541
12542 /* Handles exceptions in both to and from, but doesn't do access_ok */
12543 __must_check unsigned long
12544 -copy_user_generic_string(void *to, const void *from, unsigned len);
12545 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
12546 __must_check unsigned long
12547 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12548 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
12549
12550 static __always_inline __must_check unsigned long
12551 -copy_user_generic(void *to, const void *from, unsigned len)
12552 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
12553 +static __always_inline __must_check unsigned long
12554 +copy_user_generic(void *to, const void *from, unsigned long len)
12555 {
12556 unsigned ret;
12557
12558 @@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len)
12559 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12560 "=d" (len)),
12561 "1" (to), "2" (from), "3" (len)
12562 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12563 + : "memory", "rcx", "r8", "r9", "r11");
12564 return ret;
12565 }
12566
12567 +static __always_inline __must_check unsigned long
12568 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
12569 +static __always_inline __must_check unsigned long
12570 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
12571 __must_check unsigned long
12572 -_copy_to_user(void __user *to, const void *from, unsigned len);
12573 -__must_check unsigned long
12574 -_copy_from_user(void *to, const void __user *from, unsigned len);
12575 -__must_check unsigned long
12576 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12577 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
12578
12579 static inline unsigned long __must_check copy_from_user(void *to,
12580 const void __user *from,
12581 + unsigned long n) __size_overflow(3);
12582 +static inline unsigned long __must_check copy_from_user(void *to,
12583 + const void __user *from,
12584 unsigned long n)
12585 {
12586 - int sz = __compiletime_object_size(to);
12587 -
12588 might_fault();
12589 - if (likely(sz == -1 || sz >= n))
12590 - n = _copy_from_user(to, from, n);
12591 -#ifdef CONFIG_DEBUG_VM
12592 - else
12593 - WARN(1, "Buffer overflow detected!\n");
12594 -#endif
12595 +
12596 + if (access_ok(VERIFY_READ, from, n))
12597 + n = __copy_from_user(to, from, n);
12598 + else if (n < INT_MAX) {
12599 + if (!__builtin_constant_p(n))
12600 + check_object_size(to, n, false);
12601 + memset(to, 0, n);
12602 + }
12603 return n;
12604 }
12605
12606 static __always_inline __must_check
12607 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12608 +int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12609 +static __always_inline __must_check
12610 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12611 {
12612 might_fault();
12613
12614 - return _copy_to_user(dst, src, size);
12615 + if (access_ok(VERIFY_WRITE, dst, size))
12616 + size = __copy_to_user(dst, src, size);
12617 + return size;
12618 }
12619
12620 static __always_inline __must_check
12621 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12622 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12623 +static __always_inline __must_check
12624 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12625 {
12626 - int ret = 0;
12627 + int sz = __compiletime_object_size(dst);
12628 + unsigned ret = 0;
12629
12630 might_fault();
12631 - if (!__builtin_constant_p(size))
12632 - return copy_user_generic(dst, (__force void *)src, size);
12633 +
12634 + if (size > INT_MAX)
12635 + return size;
12636 +
12637 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12638 + if (!__access_ok(VERIFY_READ, src, size))
12639 + return size;
12640 +#endif
12641 +
12642 + if (unlikely(sz != -1 && sz < size)) {
12643 +#ifdef CONFIG_DEBUG_VM
12644 + WARN(1, "Buffer overflow detected!\n");
12645 +#endif
12646 + return size;
12647 + }
12648 +
12649 + if (!__builtin_constant_p(size)) {
12650 + check_object_size(dst, size, false);
12651 +
12652 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12653 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12654 + src += PAX_USER_SHADOW_BASE;
12655 +#endif
12656 +
12657 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12658 + }
12659 switch (size) {
12660 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12661 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12662 ret, "b", "b", "=q", 1);
12663 return ret;
12664 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12665 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12666 ret, "w", "w", "=r", 2);
12667 return ret;
12668 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12669 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12670 ret, "l", "k", "=r", 4);
12671 return ret;
12672 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12673 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12674 ret, "q", "", "=r", 8);
12675 return ret;
12676 case 10:
12677 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12678 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12679 ret, "q", "", "=r", 10);
12680 if (unlikely(ret))
12681 return ret;
12682 __get_user_asm(*(u16 *)(8 + (char *)dst),
12683 - (u16 __user *)(8 + (char __user *)src),
12684 + (const u16 __user *)(8 + (const char __user *)src),
12685 ret, "w", "w", "=r", 2);
12686 return ret;
12687 case 16:
12688 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12689 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12690 ret, "q", "", "=r", 16);
12691 if (unlikely(ret))
12692 return ret;
12693 __get_user_asm(*(u64 *)(8 + (char *)dst),
12694 - (u64 __user *)(8 + (char __user *)src),
12695 + (const u64 __user *)(8 + (const char __user *)src),
12696 ret, "q", "", "=r", 8);
12697 return ret;
12698 default:
12699 - return copy_user_generic(dst, (__force void *)src, size);
12700 +
12701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12702 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12703 + src += PAX_USER_SHADOW_BASE;
12704 +#endif
12705 +
12706 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12707 }
12708 }
12709
12710 static __always_inline __must_check
12711 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12712 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12713 +static __always_inline __must_check
12714 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12715 {
12716 - int ret = 0;
12717 + int sz = __compiletime_object_size(src);
12718 + unsigned ret = 0;
12719
12720 might_fault();
12721 - if (!__builtin_constant_p(size))
12722 - return copy_user_generic((__force void *)dst, src, size);
12723 +
12724 + if (size > INT_MAX)
12725 + return size;
12726 +
12727 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12728 + if (!__access_ok(VERIFY_WRITE, dst, size))
12729 + return size;
12730 +#endif
12731 +
12732 + if (unlikely(sz != -1 && sz < size)) {
12733 +#ifdef CONFIG_DEBUG_VM
12734 + WARN(1, "Buffer overflow detected!\n");
12735 +#endif
12736 + return size;
12737 + }
12738 +
12739 + if (!__builtin_constant_p(size)) {
12740 + check_object_size(src, size, true);
12741 +
12742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12743 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12744 + dst += PAX_USER_SHADOW_BASE;
12745 +#endif
12746 +
12747 + return copy_user_generic((__force_kernel void *)dst, src, size);
12748 + }
12749 switch (size) {
12750 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12751 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12752 ret, "b", "b", "iq", 1);
12753 return ret;
12754 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12755 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12756 ret, "w", "w", "ir", 2);
12757 return ret;
12758 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12759 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12760 ret, "l", "k", "ir", 4);
12761 return ret;
12762 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12763 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12764 ret, "q", "", "er", 8);
12765 return ret;
12766 case 10:
12767 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12768 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12769 ret, "q", "", "er", 10);
12770 if (unlikely(ret))
12771 return ret;
12772 asm("":::"memory");
12773 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12774 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12775 ret, "w", "w", "ir", 2);
12776 return ret;
12777 case 16:
12778 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12779 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12780 ret, "q", "", "er", 16);
12781 if (unlikely(ret))
12782 return ret;
12783 asm("":::"memory");
12784 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12785 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12786 ret, "q", "", "er", 8);
12787 return ret;
12788 default:
12789 - return copy_user_generic((__force void *)dst, src, size);
12790 +
12791 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12792 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12793 + dst += PAX_USER_SHADOW_BASE;
12794 +#endif
12795 +
12796 + return copy_user_generic((__force_kernel void *)dst, src, size);
12797 }
12798 }
12799
12800 static __always_inline __must_check
12801 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12802 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
12803 +static __always_inline __must_check
12804 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12805 {
12806 - int ret = 0;
12807 + unsigned ret = 0;
12808
12809 might_fault();
12810 - if (!__builtin_constant_p(size))
12811 - return copy_user_generic((__force void *)dst,
12812 - (__force void *)src, size);
12813 +
12814 + if (size > INT_MAX)
12815 + return size;
12816 +
12817 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12818 + if (!__access_ok(VERIFY_READ, src, size))
12819 + return size;
12820 + if (!__access_ok(VERIFY_WRITE, dst, size))
12821 + return size;
12822 +#endif
12823 +
12824 + if (!__builtin_constant_p(size)) {
12825 +
12826 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12827 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12828 + src += PAX_USER_SHADOW_BASE;
12829 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12830 + dst += PAX_USER_SHADOW_BASE;
12831 +#endif
12832 +
12833 + return copy_user_generic((__force_kernel void *)dst,
12834 + (__force_kernel const void *)src, size);
12835 + }
12836 switch (size) {
12837 case 1: {
12838 u8 tmp;
12839 - __get_user_asm(tmp, (u8 __user *)src,
12840 + __get_user_asm(tmp, (const u8 __user *)src,
12841 ret, "b", "b", "=q", 1);
12842 if (likely(!ret))
12843 __put_user_asm(tmp, (u8 __user *)dst,
12844 @@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12845 }
12846 case 2: {
12847 u16 tmp;
12848 - __get_user_asm(tmp, (u16 __user *)src,
12849 + __get_user_asm(tmp, (const u16 __user *)src,
12850 ret, "w", "w", "=r", 2);
12851 if (likely(!ret))
12852 __put_user_asm(tmp, (u16 __user *)dst,
12853 @@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12854
12855 case 4: {
12856 u32 tmp;
12857 - __get_user_asm(tmp, (u32 __user *)src,
12858 + __get_user_asm(tmp, (const u32 __user *)src,
12859 ret, "l", "k", "=r", 4);
12860 if (likely(!ret))
12861 __put_user_asm(tmp, (u32 __user *)dst,
12862 @@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12863 }
12864 case 8: {
12865 u64 tmp;
12866 - __get_user_asm(tmp, (u64 __user *)src,
12867 + __get_user_asm(tmp, (const u64 __user *)src,
12868 ret, "q", "", "=r", 8);
12869 if (likely(!ret))
12870 __put_user_asm(tmp, (u64 __user *)dst,
12871 @@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12872 return ret;
12873 }
12874 default:
12875 - return copy_user_generic((__force void *)dst,
12876 - (__force void *)src, size);
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12880 + src += PAX_USER_SHADOW_BASE;
12881 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12882 + dst += PAX_USER_SHADOW_BASE;
12883 +#endif
12884 +
12885 + return copy_user_generic((__force_kernel void *)dst,
12886 + (__force_kernel const void *)src, size);
12887 }
12888 }
12889
12890 @@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
12891 __must_check long strnlen_user(const char __user *str, long n);
12892 __must_check long __strnlen_user(const char __user *str, long n);
12893 __must_check long strlen_user(const char __user *str);
12894 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
12895 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12896 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12897 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12898
12899 static __must_check __always_inline int
12900 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12901 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12902 +static __must_check __always_inline int
12903 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12904 {
12905 - return copy_user_generic(dst, (__force const void *)src, size);
12906 + if (size > INT_MAX)
12907 + return size;
12908 +
12909 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12910 + if (!__access_ok(VERIFY_READ, src, size))
12911 + return size;
12912 +
12913 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12914 + src += PAX_USER_SHADOW_BASE;
12915 +#endif
12916 +
12917 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12918 }
12919
12920 -static __must_check __always_inline int
12921 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12922 +static __must_check __always_inline unsigned long
12923 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12924 +static __must_check __always_inline unsigned long
12925 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12926 {
12927 - return copy_user_generic((__force void *)dst, src, size);
12928 + if (size > INT_MAX)
12929 + return size;
12930 +
12931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12932 + if (!__access_ok(VERIFY_WRITE, dst, size))
12933 + return size;
12934 +
12935 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12936 + dst += PAX_USER_SHADOW_BASE;
12937 +#endif
12938 +
12939 + return copy_user_generic((__force_kernel void *)dst, src, size);
12940 }
12941
12942 -extern long __copy_user_nocache(void *dst, const void __user *src,
12943 - unsigned size, int zerorest);
12944 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12945 + unsigned long size, int zerorest) __size_overflow(3);
12946
12947 -static inline int
12948 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12949 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12950 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12951 {
12952 might_sleep();
12953 +
12954 + if (size > INT_MAX)
12955 + return size;
12956 +
12957 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12958 + if (!__access_ok(VERIFY_READ, src, size))
12959 + return size;
12960 +#endif
12961 +
12962 return __copy_user_nocache(dst, src, size, 1);
12963 }
12964
12965 -static inline int
12966 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12967 - unsigned size)
12968 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12969 + unsigned long size) __size_overflow(3);
12970 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12971 + unsigned long size)
12972 {
12973 + if (size > INT_MAX)
12974 + return size;
12975 +
12976 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12977 + if (!__access_ok(VERIFY_READ, src, size))
12978 + return size;
12979 +#endif
12980 +
12981 return __copy_user_nocache(dst, src, size, 0);
12982 }
12983
12984 -unsigned long
12985 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12986 +extern unsigned long
12987 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
12988
12989 #endif /* _ASM_X86_UACCESS_64_H */
12990 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12991 index bb05228..d763d5b 100644
12992 --- a/arch/x86/include/asm/vdso.h
12993 +++ b/arch/x86/include/asm/vdso.h
12994 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12995 #define VDSO32_SYMBOL(base, name) \
12996 ({ \
12997 extern const char VDSO32_##name[]; \
12998 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12999 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13000 })
13001 #endif
13002
13003 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13004 index 517d476..a1cb4d9 100644
13005 --- a/arch/x86/include/asm/x86_init.h
13006 +++ b/arch/x86/include/asm/x86_init.h
13007 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13008 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13009 void (*find_smp_config)(void);
13010 void (*get_smp_config)(unsigned int early);
13011 -};
13012 +} __no_const;
13013
13014 /**
13015 * struct x86_init_resources - platform specific resource related ops
13016 @@ -43,7 +43,7 @@ struct x86_init_resources {
13017 void (*probe_roms)(void);
13018 void (*reserve_resources)(void);
13019 char *(*memory_setup)(void);
13020 -};
13021 +} __no_const;
13022
13023 /**
13024 * struct x86_init_irqs - platform specific interrupt setup
13025 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13026 void (*pre_vector_init)(void);
13027 void (*intr_init)(void);
13028 void (*trap_init)(void);
13029 -};
13030 +} __no_const;
13031
13032 /**
13033 * struct x86_init_oem - oem platform specific customizing functions
13034 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13035 struct x86_init_oem {
13036 void (*arch_setup)(void);
13037 void (*banner)(void);
13038 -};
13039 +} __no_const;
13040
13041 /**
13042 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13043 @@ -77,7 +77,7 @@ struct x86_init_oem {
13044 */
13045 struct x86_init_mapping {
13046 void (*pagetable_reserve)(u64 start, u64 end);
13047 -};
13048 +} __no_const;
13049
13050 /**
13051 * struct x86_init_paging - platform specific paging functions
13052 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13053 struct x86_init_paging {
13054 void (*pagetable_setup_start)(pgd_t *base);
13055 void (*pagetable_setup_done)(pgd_t *base);
13056 -};
13057 +} __no_const;
13058
13059 /**
13060 * struct x86_init_timers - platform specific timer setup
13061 @@ -102,7 +102,7 @@ struct x86_init_timers {
13062 void (*tsc_pre_init)(void);
13063 void (*timer_init)(void);
13064 void (*wallclock_init)(void);
13065 -};
13066 +} __no_const;
13067
13068 /**
13069 * struct x86_init_iommu - platform specific iommu setup
13070 @@ -110,7 +110,7 @@ struct x86_init_timers {
13071 */
13072 struct x86_init_iommu {
13073 int (*iommu_init)(void);
13074 -};
13075 +} __no_const;
13076
13077 /**
13078 * struct x86_init_pci - platform specific pci init functions
13079 @@ -124,7 +124,7 @@ struct x86_init_pci {
13080 int (*init)(void);
13081 void (*init_irq)(void);
13082 void (*fixup_irqs)(void);
13083 -};
13084 +} __no_const;
13085
13086 /**
13087 * struct x86_init_ops - functions for platform specific setup
13088 @@ -140,7 +140,7 @@ struct x86_init_ops {
13089 struct x86_init_timers timers;
13090 struct x86_init_iommu iommu;
13091 struct x86_init_pci pci;
13092 -};
13093 +} __no_const;
13094
13095 /**
13096 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13097 @@ -149,7 +149,7 @@ struct x86_init_ops {
13098 struct x86_cpuinit_ops {
13099 void (*setup_percpu_clockev)(void);
13100 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13101 -};
13102 +} __no_const;
13103
13104 /**
13105 * struct x86_platform_ops - platform specific runtime functions
13106 @@ -171,7 +171,7 @@ struct x86_platform_ops {
13107 void (*nmi_init)(void);
13108 unsigned char (*get_nmi_reason)(void);
13109 int (*i8042_detect)(void);
13110 -};
13111 +} __no_const;
13112
13113 struct pci_dev;
13114
13115 @@ -180,7 +180,7 @@ struct x86_msi_ops {
13116 void (*teardown_msi_irq)(unsigned int irq);
13117 void (*teardown_msi_irqs)(struct pci_dev *dev);
13118 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13119 -};
13120 +} __no_const;
13121
13122 extern struct x86_init_ops x86_init;
13123 extern struct x86_cpuinit_ops x86_cpuinit;
13124 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13125 index c6ce245..ffbdab7 100644
13126 --- a/arch/x86/include/asm/xsave.h
13127 +++ b/arch/x86/include/asm/xsave.h
13128 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13129 {
13130 int err;
13131
13132 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13133 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13134 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13135 +#endif
13136 +
13137 /*
13138 * Clear the xsave header first, so that reserved fields are
13139 * initialized to zero.
13140 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13141 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13142 {
13143 int err;
13144 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13145 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13146 u32 lmask = mask;
13147 u32 hmask = mask >> 32;
13148
13149 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13150 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13151 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13152 +#endif
13153 +
13154 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13155 "2:\n"
13156 ".section .fixup,\"ax\"\n"
13157 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13158 index 6a564ac..9b1340c 100644
13159 --- a/arch/x86/kernel/acpi/realmode/Makefile
13160 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13161 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13162 $(call cc-option, -fno-stack-protector) \
13163 $(call cc-option, -mpreferred-stack-boundary=2)
13164 KBUILD_CFLAGS += $(call cc-option, -m32)
13165 +ifdef CONSTIFY_PLUGIN
13166 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13167 +endif
13168 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13169 GCOV_PROFILE := n
13170
13171 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13172 index b4fd836..4358fe3 100644
13173 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13174 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13175 @@ -108,6 +108,9 @@ wakeup_code:
13176 /* Do any other stuff... */
13177
13178 #ifndef CONFIG_64BIT
13179 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13180 + call verify_cpu
13181 +
13182 /* This could also be done in C code... */
13183 movl pmode_cr3, %eax
13184 movl %eax, %cr3
13185 @@ -131,6 +134,7 @@ wakeup_code:
13186 movl pmode_cr0, %eax
13187 movl %eax, %cr0
13188 jmp pmode_return
13189 +# include "../../verify_cpu.S"
13190 #else
13191 pushw $0
13192 pushw trampoline_segment
13193 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13194 index 103b6ab..2004d0a 100644
13195 --- a/arch/x86/kernel/acpi/sleep.c
13196 +++ b/arch/x86/kernel/acpi/sleep.c
13197 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13198 header->trampoline_segment = trampoline_address() >> 4;
13199 #ifdef CONFIG_SMP
13200 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13201 +
13202 + pax_open_kernel();
13203 early_gdt_descr.address =
13204 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13205 + pax_close_kernel();
13206 +
13207 initial_gs = per_cpu_offset(smp_processor_id());
13208 #endif
13209 initial_code = (unsigned long)wakeup_long64;
13210 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13211 index 13ab720..95d5442 100644
13212 --- a/arch/x86/kernel/acpi/wakeup_32.S
13213 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13214 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13215 # and restore the stack ... but you need gdt for this to work
13216 movl saved_context_esp, %esp
13217
13218 - movl %cs:saved_magic, %eax
13219 - cmpl $0x12345678, %eax
13220 + cmpl $0x12345678, saved_magic
13221 jne bogus_magic
13222
13223 # jump to place where we left off
13224 - movl saved_eip, %eax
13225 - jmp *%eax
13226 + jmp *(saved_eip)
13227
13228 bogus_magic:
13229 jmp bogus_magic
13230 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13231 index 1f84794..e23f862 100644
13232 --- a/arch/x86/kernel/alternative.c
13233 +++ b/arch/x86/kernel/alternative.c
13234 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13235 */
13236 for (a = start; a < end; a++) {
13237 instr = (u8 *)&a->instr_offset + a->instr_offset;
13238 +
13239 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13240 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13241 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13242 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13243 +#endif
13244 +
13245 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13246 BUG_ON(a->replacementlen > a->instrlen);
13247 BUG_ON(a->instrlen > sizeof(insnbuf));
13248 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13249 for (poff = start; poff < end; poff++) {
13250 u8 *ptr = (u8 *)poff + *poff;
13251
13252 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13253 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13254 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13255 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13256 +#endif
13257 +
13258 if (!*poff || ptr < text || ptr >= text_end)
13259 continue;
13260 /* turn DS segment override prefix into lock prefix */
13261 - if (*ptr == 0x3e)
13262 + if (*ktla_ktva(ptr) == 0x3e)
13263 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13264 };
13265 mutex_unlock(&text_mutex);
13266 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13267 for (poff = start; poff < end; poff++) {
13268 u8 *ptr = (u8 *)poff + *poff;
13269
13270 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13271 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13272 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13273 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13274 +#endif
13275 +
13276 if (!*poff || ptr < text || ptr >= text_end)
13277 continue;
13278 /* turn lock prefix into DS segment override prefix */
13279 - if (*ptr == 0xf0)
13280 + if (*ktla_ktva(ptr) == 0xf0)
13281 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13282 };
13283 mutex_unlock(&text_mutex);
13284 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13285
13286 BUG_ON(p->len > MAX_PATCH_LEN);
13287 /* prep the buffer with the original instructions */
13288 - memcpy(insnbuf, p->instr, p->len);
13289 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13290 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13291 (unsigned long)p->instr, p->len);
13292
13293 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13294 if (smp_alt_once)
13295 free_init_pages("SMP alternatives",
13296 (unsigned long)__smp_locks,
13297 - (unsigned long)__smp_locks_end);
13298 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13299
13300 restart_nmi();
13301 }
13302 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13303 * instructions. And on the local CPU you need to be protected again NMI or MCE
13304 * handlers seeing an inconsistent instruction while you patch.
13305 */
13306 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13307 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13308 size_t len)
13309 {
13310 unsigned long flags;
13311 local_irq_save(flags);
13312 - memcpy(addr, opcode, len);
13313 +
13314 + pax_open_kernel();
13315 + memcpy(ktla_ktva(addr), opcode, len);
13316 sync_core();
13317 + pax_close_kernel();
13318 +
13319 local_irq_restore(flags);
13320 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13321 that causes hangs on some VIA CPUs. */
13322 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13323 */
13324 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13325 {
13326 - unsigned long flags;
13327 - char *vaddr;
13328 + unsigned char *vaddr = ktla_ktva(addr);
13329 struct page *pages[2];
13330 - int i;
13331 + size_t i;
13332
13333 if (!core_kernel_text((unsigned long)addr)) {
13334 - pages[0] = vmalloc_to_page(addr);
13335 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13336 + pages[0] = vmalloc_to_page(vaddr);
13337 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13338 } else {
13339 - pages[0] = virt_to_page(addr);
13340 + pages[0] = virt_to_page(vaddr);
13341 WARN_ON(!PageReserved(pages[0]));
13342 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13343 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13344 }
13345 BUG_ON(!pages[0]);
13346 - local_irq_save(flags);
13347 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13348 - if (pages[1])
13349 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13350 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13351 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13352 - clear_fixmap(FIX_TEXT_POKE0);
13353 - if (pages[1])
13354 - clear_fixmap(FIX_TEXT_POKE1);
13355 - local_flush_tlb();
13356 - sync_core();
13357 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13358 - that causes hangs on some VIA CPUs. */
13359 + text_poke_early(addr, opcode, len);
13360 for (i = 0; i < len; i++)
13361 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13362 - local_irq_restore(flags);
13363 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13364 return addr;
13365 }
13366
13367 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13368 index 2eec05b..fef012b 100644
13369 --- a/arch/x86/kernel/apic/apic.c
13370 +++ b/arch/x86/kernel/apic/apic.c
13371 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13372 /*
13373 * Debug level, exported for io_apic.c
13374 */
13375 -unsigned int apic_verbosity;
13376 +int apic_verbosity;
13377
13378 int pic_mode;
13379
13380 @@ -1908,7 +1908,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13381 apic_write(APIC_ESR, 0);
13382 v1 = apic_read(APIC_ESR);
13383 ack_APIC_irq();
13384 - atomic_inc(&irq_err_count);
13385 + atomic_inc_unchecked(&irq_err_count);
13386
13387 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13388 smp_processor_id(), v0 , v1);
13389 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13390 index fb07275..e06bb59 100644
13391 --- a/arch/x86/kernel/apic/io_apic.c
13392 +++ b/arch/x86/kernel/apic/io_apic.c
13393 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13394 }
13395 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13396
13397 -void lock_vector_lock(void)
13398 +void lock_vector_lock(void) __acquires(vector_lock)
13399 {
13400 /* Used to the online set of cpus does not change
13401 * during assign_irq_vector.
13402 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13403 raw_spin_lock(&vector_lock);
13404 }
13405
13406 -void unlock_vector_lock(void)
13407 +void unlock_vector_lock(void) __releases(vector_lock)
13408 {
13409 raw_spin_unlock(&vector_lock);
13410 }
13411 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13412 ack_APIC_irq();
13413 }
13414
13415 -atomic_t irq_mis_count;
13416 +atomic_unchecked_t irq_mis_count;
13417
13418 static void ack_apic_level(struct irq_data *data)
13419 {
13420 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13421 * at the cpu.
13422 */
13423 if (!(v & (1 << (i & 0x1f)))) {
13424 - atomic_inc(&irq_mis_count);
13425 + atomic_inc_unchecked(&irq_mis_count);
13426
13427 eoi_ioapic_irq(irq, cfg);
13428 }
13429 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13430 index f76623c..aab694f 100644
13431 --- a/arch/x86/kernel/apm_32.c
13432 +++ b/arch/x86/kernel/apm_32.c
13433 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13434 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13435 * even though they are called in protected mode.
13436 */
13437 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13438 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13439 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13440
13441 static const char driver_version[] = "1.16ac"; /* no spaces */
13442 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13443 BUG_ON(cpu != 0);
13444 gdt = get_cpu_gdt_table(cpu);
13445 save_desc_40 = gdt[0x40 / 8];
13446 +
13447 + pax_open_kernel();
13448 gdt[0x40 / 8] = bad_bios_desc;
13449 + pax_close_kernel();
13450
13451 apm_irq_save(flags);
13452 APM_DO_SAVE_SEGS;
13453 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13454 &call->esi);
13455 APM_DO_RESTORE_SEGS;
13456 apm_irq_restore(flags);
13457 +
13458 + pax_open_kernel();
13459 gdt[0x40 / 8] = save_desc_40;
13460 + pax_close_kernel();
13461 +
13462 put_cpu();
13463
13464 return call->eax & 0xff;
13465 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13466 BUG_ON(cpu != 0);
13467 gdt = get_cpu_gdt_table(cpu);
13468 save_desc_40 = gdt[0x40 / 8];
13469 +
13470 + pax_open_kernel();
13471 gdt[0x40 / 8] = bad_bios_desc;
13472 + pax_close_kernel();
13473
13474 apm_irq_save(flags);
13475 APM_DO_SAVE_SEGS;
13476 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13477 &call->eax);
13478 APM_DO_RESTORE_SEGS;
13479 apm_irq_restore(flags);
13480 +
13481 + pax_open_kernel();
13482 gdt[0x40 / 8] = save_desc_40;
13483 + pax_close_kernel();
13484 +
13485 put_cpu();
13486 return error;
13487 }
13488 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13489 * code to that CPU.
13490 */
13491 gdt = get_cpu_gdt_table(0);
13492 +
13493 + pax_open_kernel();
13494 set_desc_base(&gdt[APM_CS >> 3],
13495 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13496 set_desc_base(&gdt[APM_CS_16 >> 3],
13497 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13498 set_desc_base(&gdt[APM_DS >> 3],
13499 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13500 + pax_close_kernel();
13501
13502 proc_create("apm", 0, NULL, &apm_file_ops);
13503
13504 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13505 index 68de2dc..1f3c720 100644
13506 --- a/arch/x86/kernel/asm-offsets.c
13507 +++ b/arch/x86/kernel/asm-offsets.c
13508 @@ -33,6 +33,8 @@ void common(void) {
13509 OFFSET(TI_status, thread_info, status);
13510 OFFSET(TI_addr_limit, thread_info, addr_limit);
13511 OFFSET(TI_preempt_count, thread_info, preempt_count);
13512 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13513 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13514
13515 BLANK();
13516 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13517 @@ -53,8 +55,26 @@ void common(void) {
13518 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13519 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13520 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13521 +
13522 +#ifdef CONFIG_PAX_KERNEXEC
13523 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13524 #endif
13525
13526 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13527 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13528 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13529 +#ifdef CONFIG_X86_64
13530 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13531 +#endif
13532 +#endif
13533 +
13534 +#endif
13535 +
13536 + BLANK();
13537 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13538 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13539 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13540 +
13541 #ifdef CONFIG_XEN
13542 BLANK();
13543 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13544 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13545 index 834e897..dacddc8 100644
13546 --- a/arch/x86/kernel/asm-offsets_64.c
13547 +++ b/arch/x86/kernel/asm-offsets_64.c
13548 @@ -70,6 +70,7 @@ int main(void)
13549 BLANK();
13550 #undef ENTRY
13551
13552 + DEFINE(TSS_size, sizeof(struct tss_struct));
13553 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13554 BLANK();
13555
13556 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13557 index 25f24dc..4094a7f 100644
13558 --- a/arch/x86/kernel/cpu/Makefile
13559 +++ b/arch/x86/kernel/cpu/Makefile
13560 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13561 CFLAGS_REMOVE_perf_event.o = -pg
13562 endif
13563
13564 -# Make sure load_percpu_segment has no stackprotector
13565 -nostackp := $(call cc-option, -fno-stack-protector)
13566 -CFLAGS_common.o := $(nostackp)
13567 -
13568 obj-y := intel_cacheinfo.o scattered.o topology.o
13569 obj-y += proc.o capflags.o powerflags.o common.o
13570 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13571 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13572 index f4773f4..b3fb13c 100644
13573 --- a/arch/x86/kernel/cpu/amd.c
13574 +++ b/arch/x86/kernel/cpu/amd.c
13575 @@ -669,7 +669,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13576 unsigned int size)
13577 {
13578 /* AMD errata T13 (order #21922) */
13579 - if ((c->x86 == 6)) {
13580 + if (c->x86 == 6) {
13581 /* Duron Rev A0 */
13582 if (c->x86_model == 3 && c->x86_mask == 0)
13583 size = 64;
13584 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13585 index c0f7d68..aa418f9 100644
13586 --- a/arch/x86/kernel/cpu/common.c
13587 +++ b/arch/x86/kernel/cpu/common.c
13588 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13589
13590 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13591
13592 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13593 -#ifdef CONFIG_X86_64
13594 - /*
13595 - * We need valid kernel segments for data and code in long mode too
13596 - * IRET will check the segment types kkeil 2000/10/28
13597 - * Also sysret mandates a special GDT layout
13598 - *
13599 - * TLS descriptors are currently at a different place compared to i386.
13600 - * Hopefully nobody expects them at a fixed place (Wine?)
13601 - */
13602 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13603 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13604 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13605 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13606 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13607 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13608 -#else
13609 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13610 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13611 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13612 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13613 - /*
13614 - * Segments used for calling PnP BIOS have byte granularity.
13615 - * They code segments and data segments have fixed 64k limits,
13616 - * the transfer segment sizes are set at run time.
13617 - */
13618 - /* 32-bit code */
13619 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13620 - /* 16-bit code */
13621 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13622 - /* 16-bit data */
13623 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13624 - /* 16-bit data */
13625 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13626 - /* 16-bit data */
13627 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13628 - /*
13629 - * The APM segments have byte granularity and their bases
13630 - * are set at run time. All have 64k limits.
13631 - */
13632 - /* 32-bit code */
13633 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13634 - /* 16-bit code */
13635 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13636 - /* data */
13637 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13638 -
13639 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13640 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13641 - GDT_STACK_CANARY_INIT
13642 -#endif
13643 -} };
13644 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13645 -
13646 static int __init x86_xsave_setup(char *s)
13647 {
13648 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13649 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13650 {
13651 struct desc_ptr gdt_descr;
13652
13653 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13654 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13655 gdt_descr.size = GDT_SIZE - 1;
13656 load_gdt(&gdt_descr);
13657 /* Reload the per-cpu base */
13658 @@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13659 /* Filter out anything that depends on CPUID levels we don't have */
13660 filter_cpuid_features(c, true);
13661
13662 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13663 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13664 +#endif
13665 +
13666 /* If the model name is still unset, do table lookup. */
13667 if (!c->x86_model_id[0]) {
13668 const char *p;
13669 @@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13670 }
13671 __setup("clearcpuid=", setup_disablecpuid);
13672
13673 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13674 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13675 +
13676 #ifdef CONFIG_X86_64
13677 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13678 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13679 - (unsigned long) nmi_idt_table };
13680 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13681
13682 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13683 irq_stack_union) __aligned(PAGE_SIZE);
13684 @@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13685 EXPORT_PER_CPU_SYMBOL(current_task);
13686
13687 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13688 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13689 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13690 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13691
13692 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13693 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13694 {
13695 memset(regs, 0, sizeof(struct pt_regs));
13696 regs->fs = __KERNEL_PERCPU;
13697 - regs->gs = __KERNEL_STACK_CANARY;
13698 + savesegment(gs, regs->gs);
13699
13700 return regs;
13701 }
13702 @@ -1190,7 +1142,7 @@ void __cpuinit cpu_init(void)
13703 int i;
13704
13705 cpu = stack_smp_processor_id();
13706 - t = &per_cpu(init_tss, cpu);
13707 + t = init_tss + cpu;
13708 oist = &per_cpu(orig_ist, cpu);
13709
13710 #ifdef CONFIG_NUMA
13711 @@ -1216,7 +1168,7 @@ void __cpuinit cpu_init(void)
13712 switch_to_new_gdt(cpu);
13713 loadsegment(fs, 0);
13714
13715 - load_idt((const struct desc_ptr *)&idt_descr);
13716 + load_idt(&idt_descr);
13717
13718 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13719 syscall_init();
13720 @@ -1225,7 +1177,6 @@ void __cpuinit cpu_init(void)
13721 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13722 barrier();
13723
13724 - x86_configure_nx();
13725 if (cpu != 0)
13726 enable_x2apic();
13727
13728 @@ -1281,7 +1232,7 @@ void __cpuinit cpu_init(void)
13729 {
13730 int cpu = smp_processor_id();
13731 struct task_struct *curr = current;
13732 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13733 + struct tss_struct *t = init_tss + cpu;
13734 struct thread_struct *thread = &curr->thread;
13735
13736 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13737 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13738 index 3e6ff6c..54b4992 100644
13739 --- a/arch/x86/kernel/cpu/intel.c
13740 +++ b/arch/x86/kernel/cpu/intel.c
13741 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13742 * Update the IDT descriptor and reload the IDT so that
13743 * it uses the read-only mapped virtual address.
13744 */
13745 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13746 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13747 load_idt(&idt_descr);
13748 }
13749 #endif
13750 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13751 index fc4beb3..f20a5a7 100644
13752 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13753 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13754 @@ -199,6 +199,8 @@ static void raise_mce(struct mce *m)
13755
13756 /* Error injection interface */
13757 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13758 + size_t usize, loff_t *off) __size_overflow(3);
13759 +static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13760 size_t usize, loff_t *off)
13761 {
13762 struct mce m;
13763 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13764 index 5a11ae2..a1a1c8a 100644
13765 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13766 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13767 @@ -42,6 +42,7 @@
13768 #include <asm/processor.h>
13769 #include <asm/mce.h>
13770 #include <asm/msr.h>
13771 +#include <asm/local.h>
13772
13773 #include "mce-internal.h"
13774
13775 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
13776 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13777 m->cs, m->ip);
13778
13779 - if (m->cs == __KERNEL_CS)
13780 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13781 print_symbol("{%s}", m->ip);
13782 pr_cont("\n");
13783 }
13784 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
13785
13786 #define PANIC_TIMEOUT 5 /* 5 seconds */
13787
13788 -static atomic_t mce_paniced;
13789 +static atomic_unchecked_t mce_paniced;
13790
13791 static int fake_panic;
13792 -static atomic_t mce_fake_paniced;
13793 +static atomic_unchecked_t mce_fake_paniced;
13794
13795 /* Panic in progress. Enable interrupts and wait for final IPI */
13796 static void wait_for_panic(void)
13797 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13798 /*
13799 * Make sure only one CPU runs in machine check panic
13800 */
13801 - if (atomic_inc_return(&mce_paniced) > 1)
13802 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13803 wait_for_panic();
13804 barrier();
13805
13806 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13807 console_verbose();
13808 } else {
13809 /* Don't log too much for fake panic */
13810 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13811 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13812 return;
13813 }
13814 /* First print corrected ones that are still unlogged */
13815 @@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
13816 * might have been modified by someone else.
13817 */
13818 rmb();
13819 - if (atomic_read(&mce_paniced))
13820 + if (atomic_read_unchecked(&mce_paniced))
13821 wait_for_panic();
13822 if (!monarch_timeout)
13823 goto out;
13824 @@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13825 }
13826
13827 /* Call the installed machine check handler for this CPU setup. */
13828 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13829 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13830 unexpected_machine_check;
13831
13832 /*
13833 @@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13834 return;
13835 }
13836
13837 + pax_open_kernel();
13838 machine_check_vector = do_machine_check;
13839 + pax_close_kernel();
13840
13841 __mcheck_cpu_init_generic();
13842 __mcheck_cpu_init_vendor(c);
13843 @@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13844 */
13845
13846 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13847 -static int mce_chrdev_open_count; /* #times opened */
13848 +static local_t mce_chrdev_open_count; /* #times opened */
13849 static int mce_chrdev_open_exclu; /* already open exclusive? */
13850
13851 static int mce_chrdev_open(struct inode *inode, struct file *file)
13852 @@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13853 spin_lock(&mce_chrdev_state_lock);
13854
13855 if (mce_chrdev_open_exclu ||
13856 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13857 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13858 spin_unlock(&mce_chrdev_state_lock);
13859
13860 return -EBUSY;
13861 @@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13862
13863 if (file->f_flags & O_EXCL)
13864 mce_chrdev_open_exclu = 1;
13865 - mce_chrdev_open_count++;
13866 + local_inc(&mce_chrdev_open_count);
13867
13868 spin_unlock(&mce_chrdev_state_lock);
13869
13870 @@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13871 {
13872 spin_lock(&mce_chrdev_state_lock);
13873
13874 - mce_chrdev_open_count--;
13875 + local_dec(&mce_chrdev_open_count);
13876 mce_chrdev_open_exclu = 0;
13877
13878 spin_unlock(&mce_chrdev_state_lock);
13879 @@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
13880 static void mce_reset(void)
13881 {
13882 cpu_missing = 0;
13883 - atomic_set(&mce_fake_paniced, 0);
13884 + atomic_set_unchecked(&mce_fake_paniced, 0);
13885 atomic_set(&mce_executing, 0);
13886 atomic_set(&mce_callin, 0);
13887 atomic_set(&global_nwo, 0);
13888 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13889 index 5c0e653..0882b0a 100644
13890 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13891 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13892 @@ -12,6 +12,7 @@
13893 #include <asm/system.h>
13894 #include <asm/mce.h>
13895 #include <asm/msr.h>
13896 +#include <asm/pgtable.h>
13897
13898 /* By default disabled */
13899 int mce_p5_enabled __read_mostly;
13900 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13901 if (!cpu_has(c, X86_FEATURE_MCE))
13902 return;
13903
13904 + pax_open_kernel();
13905 machine_check_vector = pentium_machine_check;
13906 + pax_close_kernel();
13907 /* Make sure the vector pointer is visible before we enable MCEs: */
13908 wmb();
13909
13910 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13911 index 54060f5..c1a7577 100644
13912 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13913 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13914 @@ -11,6 +11,7 @@
13915 #include <asm/system.h>
13916 #include <asm/mce.h>
13917 #include <asm/msr.h>
13918 +#include <asm/pgtable.h>
13919
13920 /* Machine check handler for WinChip C6: */
13921 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13922 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13923 {
13924 u32 lo, hi;
13925
13926 + pax_open_kernel();
13927 machine_check_vector = winchip_machine_check;
13928 + pax_close_kernel();
13929 /* Make sure the vector pointer is visible before we enable MCEs: */
13930 wmb();
13931
13932 diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
13933 index 7928963..1b16001 100644
13934 --- a/arch/x86/kernel/cpu/mtrr/if.c
13935 +++ b/arch/x86/kernel/cpu/mtrr/if.c
13936 @@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
13937 * "base=%Lx size=%Lx type=%s" or "disable=%d"
13938 */
13939 static ssize_t
13940 +mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
13941 +static ssize_t
13942 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
13943 {
13944 int i, err;
13945 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13946 index 6b96110..0da73eb 100644
13947 --- a/arch/x86/kernel/cpu/mtrr/main.c
13948 +++ b/arch/x86/kernel/cpu/mtrr/main.c
13949 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13950 u64 size_or_mask, size_and_mask;
13951 static bool mtrr_aps_delayed_init;
13952
13953 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13954 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13955
13956 const struct mtrr_ops *mtrr_if;
13957
13958 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13959 index df5e41f..816c719 100644
13960 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13961 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13962 @@ -25,7 +25,7 @@ struct mtrr_ops {
13963 int (*validate_add_page)(unsigned long base, unsigned long size,
13964 unsigned int type);
13965 int (*have_wrcomb)(void);
13966 -};
13967 +} __do_const;
13968
13969 extern int generic_get_free_region(unsigned long base, unsigned long size,
13970 int replace_reg);
13971 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13972 index 5adce10..99284ec 100644
13973 --- a/arch/x86/kernel/cpu/perf_event.c
13974 +++ b/arch/x86/kernel/cpu/perf_event.c
13975 @@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13976 break;
13977
13978 perf_callchain_store(entry, frame.return_address);
13979 - fp = frame.next_frame;
13980 + fp = (const void __force_user *)frame.next_frame;
13981 }
13982 }
13983
13984 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13985 index 13ad899..f642b9a 100644
13986 --- a/arch/x86/kernel/crash.c
13987 +++ b/arch/x86/kernel/crash.c
13988 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13989 {
13990 #ifdef CONFIG_X86_32
13991 struct pt_regs fixed_regs;
13992 -#endif
13993
13994 -#ifdef CONFIG_X86_32
13995 - if (!user_mode_vm(regs)) {
13996 + if (!user_mode(regs)) {
13997 crash_fixup_ss_esp(&fixed_regs, regs);
13998 regs = &fixed_regs;
13999 }
14000 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14001 index 37250fe..bf2ec74 100644
14002 --- a/arch/x86/kernel/doublefault_32.c
14003 +++ b/arch/x86/kernel/doublefault_32.c
14004 @@ -11,7 +11,7 @@
14005
14006 #define DOUBLEFAULT_STACKSIZE (1024)
14007 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14008 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14009 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14010
14011 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14012
14013 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14014 unsigned long gdt, tss;
14015
14016 store_gdt(&gdt_desc);
14017 - gdt = gdt_desc.address;
14018 + gdt = (unsigned long)gdt_desc.address;
14019
14020 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14021
14022 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14023 /* 0x2 bit is always set */
14024 .flags = X86_EFLAGS_SF | 0x2,
14025 .sp = STACK_START,
14026 - .es = __USER_DS,
14027 + .es = __KERNEL_DS,
14028 .cs = __KERNEL_CS,
14029 .ss = __KERNEL_DS,
14030 - .ds = __USER_DS,
14031 + .ds = __KERNEL_DS,
14032 .fs = __KERNEL_PERCPU,
14033
14034 .__cr3 = __pa_nodebug(swapper_pg_dir),
14035 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14036 index 4025fe4..d8451c6 100644
14037 --- a/arch/x86/kernel/dumpstack.c
14038 +++ b/arch/x86/kernel/dumpstack.c
14039 @@ -2,6 +2,9 @@
14040 * Copyright (C) 1991, 1992 Linus Torvalds
14041 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14042 */
14043 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14044 +#define __INCLUDED_BY_HIDESYM 1
14045 +#endif
14046 #include <linux/kallsyms.h>
14047 #include <linux/kprobes.h>
14048 #include <linux/uaccess.h>
14049 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
14050 static void
14051 print_ftrace_graph_addr(unsigned long addr, void *data,
14052 const struct stacktrace_ops *ops,
14053 - struct thread_info *tinfo, int *graph)
14054 + struct task_struct *task, int *graph)
14055 {
14056 - struct task_struct *task = tinfo->task;
14057 unsigned long ret_addr;
14058 int index = task->curr_ret_stack;
14059
14060 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14061 static inline void
14062 print_ftrace_graph_addr(unsigned long addr, void *data,
14063 const struct stacktrace_ops *ops,
14064 - struct thread_info *tinfo, int *graph)
14065 + struct task_struct *task, int *graph)
14066 { }
14067 #endif
14068
14069 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14070 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14071 */
14072
14073 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14074 - void *p, unsigned int size, void *end)
14075 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14076 {
14077 - void *t = tinfo;
14078 if (end) {
14079 if (p < end && p >= (end-THREAD_SIZE))
14080 return 1;
14081 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14082 }
14083
14084 unsigned long
14085 -print_context_stack(struct thread_info *tinfo,
14086 +print_context_stack(struct task_struct *task, void *stack_start,
14087 unsigned long *stack, unsigned long bp,
14088 const struct stacktrace_ops *ops, void *data,
14089 unsigned long *end, int *graph)
14090 {
14091 struct stack_frame *frame = (struct stack_frame *)bp;
14092
14093 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14094 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14095 unsigned long addr;
14096
14097 addr = *stack;
14098 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
14099 } else {
14100 ops->address(data, addr, 0);
14101 }
14102 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14103 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14104 }
14105 stack++;
14106 }
14107 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
14108 EXPORT_SYMBOL_GPL(print_context_stack);
14109
14110 unsigned long
14111 -print_context_stack_bp(struct thread_info *tinfo,
14112 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14113 unsigned long *stack, unsigned long bp,
14114 const struct stacktrace_ops *ops, void *data,
14115 unsigned long *end, int *graph)
14116 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14117 struct stack_frame *frame = (struct stack_frame *)bp;
14118 unsigned long *ret_addr = &frame->return_address;
14119
14120 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14121 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14122 unsigned long addr = *ret_addr;
14123
14124 if (!__kernel_text_address(addr))
14125 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14126 ops->address(data, addr, 1);
14127 frame = frame->next_frame;
14128 ret_addr = &frame->return_address;
14129 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14130 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14131 }
14132
14133 return (unsigned long)frame;
14134 @@ -186,7 +186,7 @@ void dump_stack(void)
14135
14136 bp = stack_frame(current, NULL);
14137 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14138 - current->pid, current->comm, print_tainted(),
14139 + task_pid_nr(current), current->comm, print_tainted(),
14140 init_utsname()->release,
14141 (int)strcspn(init_utsname()->version, " "),
14142 init_utsname()->version);
14143 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
14144 }
14145 EXPORT_SYMBOL_GPL(oops_begin);
14146
14147 +extern void gr_handle_kernel_exploit(void);
14148 +
14149 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14150 {
14151 if (regs && kexec_should_crash(current))
14152 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14153 panic("Fatal exception in interrupt");
14154 if (panic_on_oops)
14155 panic("Fatal exception");
14156 - do_exit(signr);
14157 +
14158 + gr_handle_kernel_exploit();
14159 +
14160 + do_group_exit(signr);
14161 }
14162
14163 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14164 @@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14165
14166 show_registers(regs);
14167 #ifdef CONFIG_X86_32
14168 - if (user_mode_vm(regs)) {
14169 + if (user_mode(regs)) {
14170 sp = regs->sp;
14171 ss = regs->ss & 0xffff;
14172 } else {
14173 @@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14174 unsigned long flags = oops_begin();
14175 int sig = SIGSEGV;
14176
14177 - if (!user_mode_vm(regs))
14178 + if (!user_mode(regs))
14179 report_bug(regs->ip, regs);
14180
14181 if (__die(str, regs, err))
14182 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14183 index c99f9ed..2a15d80 100644
14184 --- a/arch/x86/kernel/dumpstack_32.c
14185 +++ b/arch/x86/kernel/dumpstack_32.c
14186 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14187 bp = stack_frame(task, regs);
14188
14189 for (;;) {
14190 - struct thread_info *context;
14191 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14192
14193 - context = (struct thread_info *)
14194 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14195 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14196 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14197
14198 - stack = (unsigned long *)context->previous_esp;
14199 - if (!stack)
14200 + if (stack_start == task_stack_page(task))
14201 break;
14202 + stack = *(unsigned long **)stack_start;
14203 if (ops->stack(data, "IRQ") < 0)
14204 break;
14205 touch_nmi_watchdog();
14206 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14207 * When in-kernel, we also print out the stack and code at the
14208 * time of the fault..
14209 */
14210 - if (!user_mode_vm(regs)) {
14211 + if (!user_mode(regs)) {
14212 unsigned int code_prologue = code_bytes * 43 / 64;
14213 unsigned int code_len = code_bytes;
14214 unsigned char c;
14215 u8 *ip;
14216 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14217
14218 printk(KERN_EMERG "Stack:\n");
14219 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14220
14221 printk(KERN_EMERG "Code: ");
14222
14223 - ip = (u8 *)regs->ip - code_prologue;
14224 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14225 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14226 /* try starting at IP */
14227 - ip = (u8 *)regs->ip;
14228 + ip = (u8 *)regs->ip + cs_base;
14229 code_len = code_len - code_prologue + 1;
14230 }
14231 for (i = 0; i < code_len; i++, ip++) {
14232 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14233 printk(KERN_CONT " Bad EIP value.");
14234 break;
14235 }
14236 - if (ip == (u8 *)regs->ip)
14237 + if (ip == (u8 *)regs->ip + cs_base)
14238 printk(KERN_CONT "<%02x> ", c);
14239 else
14240 printk(KERN_CONT "%02x ", c);
14241 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14242 {
14243 unsigned short ud2;
14244
14245 + ip = ktla_ktva(ip);
14246 if (ip < PAGE_OFFSET)
14247 return 0;
14248 if (probe_kernel_address((unsigned short *)ip, ud2))
14249 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14250
14251 return ud2 == 0x0b0f;
14252 }
14253 +
14254 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14255 +void pax_check_alloca(unsigned long size)
14256 +{
14257 + unsigned long sp = (unsigned long)&sp, stack_left;
14258 +
14259 + /* all kernel stacks are of the same size */
14260 + stack_left = sp & (THREAD_SIZE - 1);
14261 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14262 +}
14263 +EXPORT_SYMBOL(pax_check_alloca);
14264 +#endif
14265 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14266 index 17107bd..b2deecf 100644
14267 --- a/arch/x86/kernel/dumpstack_64.c
14268 +++ b/arch/x86/kernel/dumpstack_64.c
14269 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14270 unsigned long *irq_stack_end =
14271 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14272 unsigned used = 0;
14273 - struct thread_info *tinfo;
14274 int graph = 0;
14275 unsigned long dummy;
14276 + void *stack_start;
14277
14278 if (!task)
14279 task = current;
14280 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14281 * current stack address. If the stacks consist of nested
14282 * exceptions
14283 */
14284 - tinfo = task_thread_info(task);
14285 for (;;) {
14286 char *id;
14287 unsigned long *estack_end;
14288 +
14289 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14290 &used, &id);
14291
14292 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14293 if (ops->stack(data, id) < 0)
14294 break;
14295
14296 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14297 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14298 data, estack_end, &graph);
14299 ops->stack(data, "<EOE>");
14300 /*
14301 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14302 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14303 if (ops->stack(data, "IRQ") < 0)
14304 break;
14305 - bp = ops->walk_stack(tinfo, stack, bp,
14306 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14307 ops, data, irq_stack_end, &graph);
14308 /*
14309 * We link to the next stack (which would be
14310 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14311 /*
14312 * This handles the process stack:
14313 */
14314 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14315 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14316 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14317 put_cpu();
14318 }
14319 EXPORT_SYMBOL(dump_trace);
14320 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14321
14322 return ud2 == 0x0b0f;
14323 }
14324 +
14325 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14326 +void pax_check_alloca(unsigned long size)
14327 +{
14328 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14329 + unsigned cpu, used;
14330 + char *id;
14331 +
14332 + /* check the process stack first */
14333 + stack_start = (unsigned long)task_stack_page(current);
14334 + stack_end = stack_start + THREAD_SIZE;
14335 + if (likely(stack_start <= sp && sp < stack_end)) {
14336 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14337 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14338 + return;
14339 + }
14340 +
14341 + cpu = get_cpu();
14342 +
14343 + /* check the irq stacks */
14344 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14345 + stack_start = stack_end - IRQ_STACK_SIZE;
14346 + if (stack_start <= sp && sp < stack_end) {
14347 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14348 + put_cpu();
14349 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14350 + return;
14351 + }
14352 +
14353 + /* check the exception stacks */
14354 + used = 0;
14355 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14356 + stack_start = stack_end - EXCEPTION_STKSZ;
14357 + if (stack_end && stack_start <= sp && sp < stack_end) {
14358 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14359 + put_cpu();
14360 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14361 + return;
14362 + }
14363 +
14364 + put_cpu();
14365 +
14366 + /* unknown stack */
14367 + BUG();
14368 +}
14369 +EXPORT_SYMBOL(pax_check_alloca);
14370 +#endif
14371 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14372 index 9b9f18b..9fcaa04 100644
14373 --- a/arch/x86/kernel/early_printk.c
14374 +++ b/arch/x86/kernel/early_printk.c
14375 @@ -7,6 +7,7 @@
14376 #include <linux/pci_regs.h>
14377 #include <linux/pci_ids.h>
14378 #include <linux/errno.h>
14379 +#include <linux/sched.h>
14380 #include <asm/io.h>
14381 #include <asm/processor.h>
14382 #include <asm/fcntl.h>
14383 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14384 index 7b784f4..76aaad7 100644
14385 --- a/arch/x86/kernel/entry_32.S
14386 +++ b/arch/x86/kernel/entry_32.S
14387 @@ -179,13 +179,146 @@
14388 /*CFI_REL_OFFSET gs, PT_GS*/
14389 .endm
14390 .macro SET_KERNEL_GS reg
14391 +
14392 +#ifdef CONFIG_CC_STACKPROTECTOR
14393 movl $(__KERNEL_STACK_CANARY), \reg
14394 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14395 + movl $(__USER_DS), \reg
14396 +#else
14397 + xorl \reg, \reg
14398 +#endif
14399 +
14400 movl \reg, %gs
14401 .endm
14402
14403 #endif /* CONFIG_X86_32_LAZY_GS */
14404
14405 -.macro SAVE_ALL
14406 +.macro pax_enter_kernel
14407 +#ifdef CONFIG_PAX_KERNEXEC
14408 + call pax_enter_kernel
14409 +#endif
14410 +.endm
14411 +
14412 +.macro pax_exit_kernel
14413 +#ifdef CONFIG_PAX_KERNEXEC
14414 + call pax_exit_kernel
14415 +#endif
14416 +.endm
14417 +
14418 +#ifdef CONFIG_PAX_KERNEXEC
14419 +ENTRY(pax_enter_kernel)
14420 +#ifdef CONFIG_PARAVIRT
14421 + pushl %eax
14422 + pushl %ecx
14423 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14424 + mov %eax, %esi
14425 +#else
14426 + mov %cr0, %esi
14427 +#endif
14428 + bts $16, %esi
14429 + jnc 1f
14430 + mov %cs, %esi
14431 + cmp $__KERNEL_CS, %esi
14432 + jz 3f
14433 + ljmp $__KERNEL_CS, $3f
14434 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14435 +2:
14436 +#ifdef CONFIG_PARAVIRT
14437 + mov %esi, %eax
14438 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14439 +#else
14440 + mov %esi, %cr0
14441 +#endif
14442 +3:
14443 +#ifdef CONFIG_PARAVIRT
14444 + popl %ecx
14445 + popl %eax
14446 +#endif
14447 + ret
14448 +ENDPROC(pax_enter_kernel)
14449 +
14450 +ENTRY(pax_exit_kernel)
14451 +#ifdef CONFIG_PARAVIRT
14452 + pushl %eax
14453 + pushl %ecx
14454 +#endif
14455 + mov %cs, %esi
14456 + cmp $__KERNEXEC_KERNEL_CS, %esi
14457 + jnz 2f
14458 +#ifdef CONFIG_PARAVIRT
14459 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14460 + mov %eax, %esi
14461 +#else
14462 + mov %cr0, %esi
14463 +#endif
14464 + btr $16, %esi
14465 + ljmp $__KERNEL_CS, $1f
14466 +1:
14467 +#ifdef CONFIG_PARAVIRT
14468 + mov %esi, %eax
14469 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14470 +#else
14471 + mov %esi, %cr0
14472 +#endif
14473 +2:
14474 +#ifdef CONFIG_PARAVIRT
14475 + popl %ecx
14476 + popl %eax
14477 +#endif
14478 + ret
14479 +ENDPROC(pax_exit_kernel)
14480 +#endif
14481 +
14482 +.macro pax_erase_kstack
14483 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14484 + call pax_erase_kstack
14485 +#endif
14486 +.endm
14487 +
14488 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14489 +/*
14490 + * ebp: thread_info
14491 + * ecx, edx: can be clobbered
14492 + */
14493 +ENTRY(pax_erase_kstack)
14494 + pushl %edi
14495 + pushl %eax
14496 +
14497 + mov TI_lowest_stack(%ebp), %edi
14498 + mov $-0xBEEF, %eax
14499 + std
14500 +
14501 +1: mov %edi, %ecx
14502 + and $THREAD_SIZE_asm - 1, %ecx
14503 + shr $2, %ecx
14504 + repne scasl
14505 + jecxz 2f
14506 +
14507 + cmp $2*16, %ecx
14508 + jc 2f
14509 +
14510 + mov $2*16, %ecx
14511 + repe scasl
14512 + jecxz 2f
14513 + jne 1b
14514 +
14515 +2: cld
14516 + mov %esp, %ecx
14517 + sub %edi, %ecx
14518 + shr $2, %ecx
14519 + rep stosl
14520 +
14521 + mov TI_task_thread_sp0(%ebp), %edi
14522 + sub $128, %edi
14523 + mov %edi, TI_lowest_stack(%ebp)
14524 +
14525 + popl %eax
14526 + popl %edi
14527 + ret
14528 +ENDPROC(pax_erase_kstack)
14529 +#endif
14530 +
14531 +.macro __SAVE_ALL _DS
14532 cld
14533 PUSH_GS
14534 pushl_cfi %fs
14535 @@ -208,7 +341,7 @@
14536 CFI_REL_OFFSET ecx, 0
14537 pushl_cfi %ebx
14538 CFI_REL_OFFSET ebx, 0
14539 - movl $(__USER_DS), %edx
14540 + movl $\_DS, %edx
14541 movl %edx, %ds
14542 movl %edx, %es
14543 movl $(__KERNEL_PERCPU), %edx
14544 @@ -216,6 +349,15 @@
14545 SET_KERNEL_GS %edx
14546 .endm
14547
14548 +.macro SAVE_ALL
14549 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14550 + __SAVE_ALL __KERNEL_DS
14551 + pax_enter_kernel
14552 +#else
14553 + __SAVE_ALL __USER_DS
14554 +#endif
14555 +.endm
14556 +
14557 .macro RESTORE_INT_REGS
14558 popl_cfi %ebx
14559 CFI_RESTORE ebx
14560 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
14561 popfl_cfi
14562 jmp syscall_exit
14563 CFI_ENDPROC
14564 -END(ret_from_fork)
14565 +ENDPROC(ret_from_fork)
14566
14567 /*
14568 * Interrupt exit functions should be protected against kprobes
14569 @@ -335,7 +477,15 @@ resume_userspace_sig:
14570 andl $SEGMENT_RPL_MASK, %eax
14571 #endif
14572 cmpl $USER_RPL, %eax
14573 +
14574 +#ifdef CONFIG_PAX_KERNEXEC
14575 + jae resume_userspace
14576 +
14577 + PAX_EXIT_KERNEL
14578 + jmp resume_kernel
14579 +#else
14580 jb resume_kernel # not returning to v8086 or userspace
14581 +#endif
14582
14583 ENTRY(resume_userspace)
14584 LOCKDEP_SYS_EXIT
14585 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
14586 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14587 # int/exception return?
14588 jne work_pending
14589 - jmp restore_all
14590 -END(ret_from_exception)
14591 + jmp restore_all_pax
14592 +ENDPROC(ret_from_exception)
14593
14594 #ifdef CONFIG_PREEMPT
14595 ENTRY(resume_kernel)
14596 @@ -363,7 +513,7 @@ need_resched:
14597 jz restore_all
14598 call preempt_schedule_irq
14599 jmp need_resched
14600 -END(resume_kernel)
14601 +ENDPROC(resume_kernel)
14602 #endif
14603 CFI_ENDPROC
14604 /*
14605 @@ -397,23 +547,34 @@ sysenter_past_esp:
14606 /*CFI_REL_OFFSET cs, 0*/
14607 /*
14608 * Push current_thread_info()->sysenter_return to the stack.
14609 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14610 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14611 */
14612 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14613 + pushl_cfi $0
14614 CFI_REL_OFFSET eip, 0
14615
14616 pushl_cfi %eax
14617 SAVE_ALL
14618 + GET_THREAD_INFO(%ebp)
14619 + movl TI_sysenter_return(%ebp),%ebp
14620 + movl %ebp,PT_EIP(%esp)
14621 ENABLE_INTERRUPTS(CLBR_NONE)
14622
14623 /*
14624 * Load the potential sixth argument from user stack.
14625 * Careful about security.
14626 */
14627 + movl PT_OLDESP(%esp),%ebp
14628 +
14629 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14630 + mov PT_OLDSS(%esp),%ds
14631 +1: movl %ds:(%ebp),%ebp
14632 + push %ss
14633 + pop %ds
14634 +#else
14635 cmpl $__PAGE_OFFSET-3,%ebp
14636 jae syscall_fault
14637 1: movl (%ebp),%ebp
14638 +#endif
14639 +
14640 movl %ebp,PT_EBP(%esp)
14641 .section __ex_table,"a"
14642 .align 4
14643 @@ -436,12 +597,24 @@ sysenter_do_call:
14644 testl $_TIF_ALLWORK_MASK, %ecx
14645 jne sysexit_audit
14646 sysenter_exit:
14647 +
14648 +#ifdef CONFIG_PAX_RANDKSTACK
14649 + pushl_cfi %eax
14650 + movl %esp, %eax
14651 + call pax_randomize_kstack
14652 + popl_cfi %eax
14653 +#endif
14654 +
14655 + pax_erase_kstack
14656 +
14657 /* if something modifies registers it must also disable sysexit */
14658 movl PT_EIP(%esp), %edx
14659 movl PT_OLDESP(%esp), %ecx
14660 xorl %ebp,%ebp
14661 TRACE_IRQS_ON
14662 1: mov PT_FS(%esp), %fs
14663 +2: mov PT_DS(%esp), %ds
14664 +3: mov PT_ES(%esp), %es
14665 PTGS_TO_GS
14666 ENABLE_INTERRUPTS_SYSEXIT
14667
14668 @@ -458,6 +631,9 @@ sysenter_audit:
14669 movl %eax,%edx /* 2nd arg: syscall number */
14670 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14671 call __audit_syscall_entry
14672 +
14673 + pax_erase_kstack
14674 +
14675 pushl_cfi %ebx
14676 movl PT_EAX(%esp),%eax /* reload syscall number */
14677 jmp sysenter_do_call
14678 @@ -483,11 +659,17 @@ sysexit_audit:
14679
14680 CFI_ENDPROC
14681 .pushsection .fixup,"ax"
14682 -2: movl $0,PT_FS(%esp)
14683 +4: movl $0,PT_FS(%esp)
14684 + jmp 1b
14685 +5: movl $0,PT_DS(%esp)
14686 + jmp 1b
14687 +6: movl $0,PT_ES(%esp)
14688 jmp 1b
14689 .section __ex_table,"a"
14690 .align 4
14691 - .long 1b,2b
14692 + .long 1b,4b
14693 + .long 2b,5b
14694 + .long 3b,6b
14695 .popsection
14696 PTGS_TO_GS_EX
14697 ENDPROC(ia32_sysenter_target)
14698 @@ -520,6 +702,15 @@ syscall_exit:
14699 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14700 jne syscall_exit_work
14701
14702 +restore_all_pax:
14703 +
14704 +#ifdef CONFIG_PAX_RANDKSTACK
14705 + movl %esp, %eax
14706 + call pax_randomize_kstack
14707 +#endif
14708 +
14709 + pax_erase_kstack
14710 +
14711 restore_all:
14712 TRACE_IRQS_IRET
14713 restore_all_notrace:
14714 @@ -579,14 +770,34 @@ ldt_ss:
14715 * compensating for the offset by changing to the ESPFIX segment with
14716 * a base address that matches for the difference.
14717 */
14718 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14719 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14720 mov %esp, %edx /* load kernel esp */
14721 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14722 mov %dx, %ax /* eax: new kernel esp */
14723 sub %eax, %edx /* offset (low word is 0) */
14724 +#ifdef CONFIG_SMP
14725 + movl PER_CPU_VAR(cpu_number), %ebx
14726 + shll $PAGE_SHIFT_asm, %ebx
14727 + addl $cpu_gdt_table, %ebx
14728 +#else
14729 + movl $cpu_gdt_table, %ebx
14730 +#endif
14731 shr $16, %edx
14732 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14733 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14734 +
14735 +#ifdef CONFIG_PAX_KERNEXEC
14736 + mov %cr0, %esi
14737 + btr $16, %esi
14738 + mov %esi, %cr0
14739 +#endif
14740 +
14741 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14742 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14743 +
14744 +#ifdef CONFIG_PAX_KERNEXEC
14745 + bts $16, %esi
14746 + mov %esi, %cr0
14747 +#endif
14748 +
14749 pushl_cfi $__ESPFIX_SS
14750 pushl_cfi %eax /* new kernel esp */
14751 /* Disable interrupts, but do not irqtrace this section: we
14752 @@ -615,38 +826,30 @@ work_resched:
14753 movl TI_flags(%ebp), %ecx
14754 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14755 # than syscall tracing?
14756 - jz restore_all
14757 + jz restore_all_pax
14758 testb $_TIF_NEED_RESCHED, %cl
14759 jnz work_resched
14760
14761 work_notifysig: # deal with pending signals and
14762 # notify-resume requests
14763 + movl %esp, %eax
14764 #ifdef CONFIG_VM86
14765 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14766 - movl %esp, %eax
14767 - jne work_notifysig_v86 # returning to kernel-space or
14768 + jz 1f # returning to kernel-space or
14769 # vm86-space
14770 - TRACE_IRQS_ON
14771 - ENABLE_INTERRUPTS(CLBR_NONE)
14772 - xorl %edx, %edx
14773 - call do_notify_resume
14774 - jmp resume_userspace_sig
14775
14776 - ALIGN
14777 -work_notifysig_v86:
14778 pushl_cfi %ecx # save ti_flags for do_notify_resume
14779 call save_v86_state # %eax contains pt_regs pointer
14780 popl_cfi %ecx
14781 movl %eax, %esp
14782 -#else
14783 - movl %esp, %eax
14784 +1:
14785 #endif
14786 TRACE_IRQS_ON
14787 ENABLE_INTERRUPTS(CLBR_NONE)
14788 xorl %edx, %edx
14789 call do_notify_resume
14790 jmp resume_userspace_sig
14791 -END(work_pending)
14792 +ENDPROC(work_pending)
14793
14794 # perform syscall exit tracing
14795 ALIGN
14796 @@ -654,11 +857,14 @@ syscall_trace_entry:
14797 movl $-ENOSYS,PT_EAX(%esp)
14798 movl %esp, %eax
14799 call syscall_trace_enter
14800 +
14801 + pax_erase_kstack
14802 +
14803 /* What it returned is what we'll actually use. */
14804 cmpl $(NR_syscalls), %eax
14805 jnae syscall_call
14806 jmp syscall_exit
14807 -END(syscall_trace_entry)
14808 +ENDPROC(syscall_trace_entry)
14809
14810 # perform syscall exit tracing
14811 ALIGN
14812 @@ -671,20 +877,24 @@ syscall_exit_work:
14813 movl %esp, %eax
14814 call syscall_trace_leave
14815 jmp resume_userspace
14816 -END(syscall_exit_work)
14817 +ENDPROC(syscall_exit_work)
14818 CFI_ENDPROC
14819
14820 RING0_INT_FRAME # can't unwind into user space anyway
14821 syscall_fault:
14822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14823 + push %ss
14824 + pop %ds
14825 +#endif
14826 GET_THREAD_INFO(%ebp)
14827 movl $-EFAULT,PT_EAX(%esp)
14828 jmp resume_userspace
14829 -END(syscall_fault)
14830 +ENDPROC(syscall_fault)
14831
14832 syscall_badsys:
14833 movl $-ENOSYS,PT_EAX(%esp)
14834 jmp resume_userspace
14835 -END(syscall_badsys)
14836 +ENDPROC(syscall_badsys)
14837 CFI_ENDPROC
14838 /*
14839 * End of kprobes section
14840 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
14841 CFI_ENDPROC
14842 ENDPROC(ptregs_clone)
14843
14844 + ALIGN;
14845 +ENTRY(kernel_execve)
14846 + CFI_STARTPROC
14847 + pushl_cfi %ebp
14848 + sub $PT_OLDSS+4,%esp
14849 + pushl_cfi %edi
14850 + pushl_cfi %ecx
14851 + pushl_cfi %eax
14852 + lea 3*4(%esp),%edi
14853 + mov $PT_OLDSS/4+1,%ecx
14854 + xorl %eax,%eax
14855 + rep stosl
14856 + popl_cfi %eax
14857 + popl_cfi %ecx
14858 + popl_cfi %edi
14859 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14860 + pushl_cfi %esp
14861 + call sys_execve
14862 + add $4,%esp
14863 + CFI_ADJUST_CFA_OFFSET -4
14864 + GET_THREAD_INFO(%ebp)
14865 + test %eax,%eax
14866 + jz syscall_exit
14867 + add $PT_OLDSS+4,%esp
14868 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14869 + popl_cfi %ebp
14870 + ret
14871 + CFI_ENDPROC
14872 +ENDPROC(kernel_execve)
14873 +
14874 .macro FIXUP_ESPFIX_STACK
14875 /*
14876 * Switch back for ESPFIX stack to the normal zerobased stack
14877 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
14878 * normal stack and adjusts ESP with the matching offset.
14879 */
14880 /* fixup the stack */
14881 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14882 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14883 +#ifdef CONFIG_SMP
14884 + movl PER_CPU_VAR(cpu_number), %ebx
14885 + shll $PAGE_SHIFT_asm, %ebx
14886 + addl $cpu_gdt_table, %ebx
14887 +#else
14888 + movl $cpu_gdt_table, %ebx
14889 +#endif
14890 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14891 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14892 shl $16, %eax
14893 addl %esp, %eax /* the adjusted stack pointer */
14894 pushl_cfi $__KERNEL_DS
14895 @@ -819,7 +1066,7 @@ vector=vector+1
14896 .endr
14897 2: jmp common_interrupt
14898 .endr
14899 -END(irq_entries_start)
14900 +ENDPROC(irq_entries_start)
14901
14902 .previous
14903 END(interrupt)
14904 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
14905 pushl_cfi $do_coprocessor_error
14906 jmp error_code
14907 CFI_ENDPROC
14908 -END(coprocessor_error)
14909 +ENDPROC(coprocessor_error)
14910
14911 ENTRY(simd_coprocessor_error)
14912 RING0_INT_FRAME
14913 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
14914 #endif
14915 jmp error_code
14916 CFI_ENDPROC
14917 -END(simd_coprocessor_error)
14918 +ENDPROC(simd_coprocessor_error)
14919
14920 ENTRY(device_not_available)
14921 RING0_INT_FRAME
14922 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
14923 pushl_cfi $do_device_not_available
14924 jmp error_code
14925 CFI_ENDPROC
14926 -END(device_not_available)
14927 +ENDPROC(device_not_available)
14928
14929 #ifdef CONFIG_PARAVIRT
14930 ENTRY(native_iret)
14931 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
14932 .align 4
14933 .long native_iret, iret_exc
14934 .previous
14935 -END(native_iret)
14936 +ENDPROC(native_iret)
14937
14938 ENTRY(native_irq_enable_sysexit)
14939 sti
14940 sysexit
14941 -END(native_irq_enable_sysexit)
14942 +ENDPROC(native_irq_enable_sysexit)
14943 #endif
14944
14945 ENTRY(overflow)
14946 @@ -919,7 +1166,7 @@ ENTRY(overflow)
14947 pushl_cfi $do_overflow
14948 jmp error_code
14949 CFI_ENDPROC
14950 -END(overflow)
14951 +ENDPROC(overflow)
14952
14953 ENTRY(bounds)
14954 RING0_INT_FRAME
14955 @@ -927,7 +1174,7 @@ ENTRY(bounds)
14956 pushl_cfi $do_bounds
14957 jmp error_code
14958 CFI_ENDPROC
14959 -END(bounds)
14960 +ENDPROC(bounds)
14961
14962 ENTRY(invalid_op)
14963 RING0_INT_FRAME
14964 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
14965 pushl_cfi $do_invalid_op
14966 jmp error_code
14967 CFI_ENDPROC
14968 -END(invalid_op)
14969 +ENDPROC(invalid_op)
14970
14971 ENTRY(coprocessor_segment_overrun)
14972 RING0_INT_FRAME
14973 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
14974 pushl_cfi $do_coprocessor_segment_overrun
14975 jmp error_code
14976 CFI_ENDPROC
14977 -END(coprocessor_segment_overrun)
14978 +ENDPROC(coprocessor_segment_overrun)
14979
14980 ENTRY(invalid_TSS)
14981 RING0_EC_FRAME
14982 pushl_cfi $do_invalid_TSS
14983 jmp error_code
14984 CFI_ENDPROC
14985 -END(invalid_TSS)
14986 +ENDPROC(invalid_TSS)
14987
14988 ENTRY(segment_not_present)
14989 RING0_EC_FRAME
14990 pushl_cfi $do_segment_not_present
14991 jmp error_code
14992 CFI_ENDPROC
14993 -END(segment_not_present)
14994 +ENDPROC(segment_not_present)
14995
14996 ENTRY(stack_segment)
14997 RING0_EC_FRAME
14998 pushl_cfi $do_stack_segment
14999 jmp error_code
15000 CFI_ENDPROC
15001 -END(stack_segment)
15002 +ENDPROC(stack_segment)
15003
15004 ENTRY(alignment_check)
15005 RING0_EC_FRAME
15006 pushl_cfi $do_alignment_check
15007 jmp error_code
15008 CFI_ENDPROC
15009 -END(alignment_check)
15010 +ENDPROC(alignment_check)
15011
15012 ENTRY(divide_error)
15013 RING0_INT_FRAME
15014 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15015 pushl_cfi $do_divide_error
15016 jmp error_code
15017 CFI_ENDPROC
15018 -END(divide_error)
15019 +ENDPROC(divide_error)
15020
15021 #ifdef CONFIG_X86_MCE
15022 ENTRY(machine_check)
15023 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15024 pushl_cfi machine_check_vector
15025 jmp error_code
15026 CFI_ENDPROC
15027 -END(machine_check)
15028 +ENDPROC(machine_check)
15029 #endif
15030
15031 ENTRY(spurious_interrupt_bug)
15032 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15033 pushl_cfi $do_spurious_interrupt_bug
15034 jmp error_code
15035 CFI_ENDPROC
15036 -END(spurious_interrupt_bug)
15037 +ENDPROC(spurious_interrupt_bug)
15038 /*
15039 * End of kprobes section
15040 */
15041 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15042
15043 ENTRY(mcount)
15044 ret
15045 -END(mcount)
15046 +ENDPROC(mcount)
15047
15048 ENTRY(ftrace_caller)
15049 cmpl $0, function_trace_stop
15050 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15051 .globl ftrace_stub
15052 ftrace_stub:
15053 ret
15054 -END(ftrace_caller)
15055 +ENDPROC(ftrace_caller)
15056
15057 #else /* ! CONFIG_DYNAMIC_FTRACE */
15058
15059 @@ -1177,7 +1424,7 @@ trace:
15060 popl %ecx
15061 popl %eax
15062 jmp ftrace_stub
15063 -END(mcount)
15064 +ENDPROC(mcount)
15065 #endif /* CONFIG_DYNAMIC_FTRACE */
15066 #endif /* CONFIG_FUNCTION_TRACER */
15067
15068 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15069 popl %ecx
15070 popl %eax
15071 ret
15072 -END(ftrace_graph_caller)
15073 +ENDPROC(ftrace_graph_caller)
15074
15075 .globl return_to_handler
15076 return_to_handler:
15077 @@ -1253,15 +1500,18 @@ error_code:
15078 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15079 REG_TO_PTGS %ecx
15080 SET_KERNEL_GS %ecx
15081 - movl $(__USER_DS), %ecx
15082 + movl $(__KERNEL_DS), %ecx
15083 movl %ecx, %ds
15084 movl %ecx, %es
15085 +
15086 + pax_enter_kernel
15087 +
15088 TRACE_IRQS_OFF
15089 movl %esp,%eax # pt_regs pointer
15090 call *%edi
15091 jmp ret_from_exception
15092 CFI_ENDPROC
15093 -END(page_fault)
15094 +ENDPROC(page_fault)
15095
15096 /*
15097 * Debug traps and NMI can happen at the one SYSENTER instruction
15098 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15099 call do_debug
15100 jmp ret_from_exception
15101 CFI_ENDPROC
15102 -END(debug)
15103 +ENDPROC(debug)
15104
15105 /*
15106 * NMI is doubly nasty. It can happen _while_ we're handling
15107 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15108 xorl %edx,%edx # zero error code
15109 movl %esp,%eax # pt_regs pointer
15110 call do_nmi
15111 +
15112 + pax_exit_kernel
15113 +
15114 jmp restore_all_notrace
15115 CFI_ENDPROC
15116
15117 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15118 FIXUP_ESPFIX_STACK # %eax == %esp
15119 xorl %edx,%edx # zero error code
15120 call do_nmi
15121 +
15122 + pax_exit_kernel
15123 +
15124 RESTORE_REGS
15125 lss 12+4(%esp), %esp # back to espfix stack
15126 CFI_ADJUST_CFA_OFFSET -24
15127 jmp irq_return
15128 CFI_ENDPROC
15129 -END(nmi)
15130 +ENDPROC(nmi)
15131
15132 ENTRY(int3)
15133 RING0_INT_FRAME
15134 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15135 call do_int3
15136 jmp ret_from_exception
15137 CFI_ENDPROC
15138 -END(int3)
15139 +ENDPROC(int3)
15140
15141 ENTRY(general_protection)
15142 RING0_EC_FRAME
15143 pushl_cfi $do_general_protection
15144 jmp error_code
15145 CFI_ENDPROC
15146 -END(general_protection)
15147 +ENDPROC(general_protection)
15148
15149 #ifdef CONFIG_KVM_GUEST
15150 ENTRY(async_page_fault)
15151 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15152 pushl_cfi $do_async_page_fault
15153 jmp error_code
15154 CFI_ENDPROC
15155 -END(async_page_fault)
15156 +ENDPROC(async_page_fault)
15157 #endif
15158
15159 /*
15160 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15161 index 1333d98..b340ca2 100644
15162 --- a/arch/x86/kernel/entry_64.S
15163 +++ b/arch/x86/kernel/entry_64.S
15164 @@ -56,6 +56,8 @@
15165 #include <asm/ftrace.h>
15166 #include <asm/percpu.h>
15167 #include <linux/err.h>
15168 +#include <asm/pgtable.h>
15169 +#include <asm/alternative-asm.h>
15170
15171 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15172 #include <linux/elf-em.h>
15173 @@ -69,8 +71,9 @@
15174 #ifdef CONFIG_FUNCTION_TRACER
15175 #ifdef CONFIG_DYNAMIC_FTRACE
15176 ENTRY(mcount)
15177 + pax_force_retaddr
15178 retq
15179 -END(mcount)
15180 +ENDPROC(mcount)
15181
15182 ENTRY(ftrace_caller)
15183 cmpl $0, function_trace_stop
15184 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15185 #endif
15186
15187 GLOBAL(ftrace_stub)
15188 + pax_force_retaddr
15189 retq
15190 -END(ftrace_caller)
15191 +ENDPROC(ftrace_caller)
15192
15193 #else /* ! CONFIG_DYNAMIC_FTRACE */
15194 ENTRY(mcount)
15195 @@ -113,6 +117,7 @@ ENTRY(mcount)
15196 #endif
15197
15198 GLOBAL(ftrace_stub)
15199 + pax_force_retaddr
15200 retq
15201
15202 trace:
15203 @@ -122,12 +127,13 @@ trace:
15204 movq 8(%rbp), %rsi
15205 subq $MCOUNT_INSN_SIZE, %rdi
15206
15207 + pax_force_fptr ftrace_trace_function
15208 call *ftrace_trace_function
15209
15210 MCOUNT_RESTORE_FRAME
15211
15212 jmp ftrace_stub
15213 -END(mcount)
15214 +ENDPROC(mcount)
15215 #endif /* CONFIG_DYNAMIC_FTRACE */
15216 #endif /* CONFIG_FUNCTION_TRACER */
15217
15218 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15219
15220 MCOUNT_RESTORE_FRAME
15221
15222 + pax_force_retaddr
15223 retq
15224 -END(ftrace_graph_caller)
15225 +ENDPROC(ftrace_graph_caller)
15226
15227 GLOBAL(return_to_handler)
15228 subq $24, %rsp
15229 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15230 movq 8(%rsp), %rdx
15231 movq (%rsp), %rax
15232 addq $24, %rsp
15233 + pax_force_fptr %rdi
15234 jmp *%rdi
15235 #endif
15236
15237 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15238 ENDPROC(native_usergs_sysret64)
15239 #endif /* CONFIG_PARAVIRT */
15240
15241 + .macro ljmpq sel, off
15242 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15243 + .byte 0x48; ljmp *1234f(%rip)
15244 + .pushsection .rodata
15245 + .align 16
15246 + 1234: .quad \off; .word \sel
15247 + .popsection
15248 +#else
15249 + pushq $\sel
15250 + pushq $\off
15251 + lretq
15252 +#endif
15253 + .endm
15254 +
15255 + .macro pax_enter_kernel
15256 + pax_set_fptr_mask
15257 +#ifdef CONFIG_PAX_KERNEXEC
15258 + call pax_enter_kernel
15259 +#endif
15260 + .endm
15261 +
15262 + .macro pax_exit_kernel
15263 +#ifdef CONFIG_PAX_KERNEXEC
15264 + call pax_exit_kernel
15265 +#endif
15266 + .endm
15267 +
15268 +#ifdef CONFIG_PAX_KERNEXEC
15269 +ENTRY(pax_enter_kernel)
15270 + pushq %rdi
15271 +
15272 +#ifdef CONFIG_PARAVIRT
15273 + PV_SAVE_REGS(CLBR_RDI)
15274 +#endif
15275 +
15276 + GET_CR0_INTO_RDI
15277 + bts $16,%rdi
15278 + jnc 3f
15279 + mov %cs,%edi
15280 + cmp $__KERNEL_CS,%edi
15281 + jnz 2f
15282 +1:
15283 +
15284 +#ifdef CONFIG_PARAVIRT
15285 + PV_RESTORE_REGS(CLBR_RDI)
15286 +#endif
15287 +
15288 + popq %rdi
15289 + pax_force_retaddr
15290 + retq
15291 +
15292 +2: ljmpq __KERNEL_CS,1f
15293 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15294 +4: SET_RDI_INTO_CR0
15295 + jmp 1b
15296 +ENDPROC(pax_enter_kernel)
15297 +
15298 +ENTRY(pax_exit_kernel)
15299 + pushq %rdi
15300 +
15301 +#ifdef CONFIG_PARAVIRT
15302 + PV_SAVE_REGS(CLBR_RDI)
15303 +#endif
15304 +
15305 + mov %cs,%rdi
15306 + cmp $__KERNEXEC_KERNEL_CS,%edi
15307 + jz 2f
15308 +1:
15309 +
15310 +#ifdef CONFIG_PARAVIRT
15311 + PV_RESTORE_REGS(CLBR_RDI);
15312 +#endif
15313 +
15314 + popq %rdi
15315 + pax_force_retaddr
15316 + retq
15317 +
15318 +2: GET_CR0_INTO_RDI
15319 + btr $16,%rdi
15320 + ljmpq __KERNEL_CS,3f
15321 +3: SET_RDI_INTO_CR0
15322 + jmp 1b
15323 +#ifdef CONFIG_PARAVIRT
15324 + PV_RESTORE_REGS(CLBR_RDI);
15325 +#endif
15326 +
15327 + popq %rdi
15328 + pax_force_retaddr
15329 + retq
15330 +ENDPROC(pax_exit_kernel)
15331 +#endif
15332 +
15333 + .macro pax_enter_kernel_user
15334 + pax_set_fptr_mask
15335 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15336 + call pax_enter_kernel_user
15337 +#endif
15338 + .endm
15339 +
15340 + .macro pax_exit_kernel_user
15341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15342 + call pax_exit_kernel_user
15343 +#endif
15344 +#ifdef CONFIG_PAX_RANDKSTACK
15345 + pushq %rax
15346 + call pax_randomize_kstack
15347 + popq %rax
15348 +#endif
15349 + .endm
15350 +
15351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15352 +ENTRY(pax_enter_kernel_user)
15353 + pushq %rdi
15354 + pushq %rbx
15355 +
15356 +#ifdef CONFIG_PARAVIRT
15357 + PV_SAVE_REGS(CLBR_RDI)
15358 +#endif
15359 +
15360 + GET_CR3_INTO_RDI
15361 + mov %rdi,%rbx
15362 + add $__START_KERNEL_map,%rbx
15363 + sub phys_base(%rip),%rbx
15364 +
15365 +#ifdef CONFIG_PARAVIRT
15366 + pushq %rdi
15367 + cmpl $0, pv_info+PARAVIRT_enabled
15368 + jz 1f
15369 + i = 0
15370 + .rept USER_PGD_PTRS
15371 + mov i*8(%rbx),%rsi
15372 + mov $0,%sil
15373 + lea i*8(%rbx),%rdi
15374 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15375 + i = i + 1
15376 + .endr
15377 + jmp 2f
15378 +1:
15379 +#endif
15380 +
15381 + i = 0
15382 + .rept USER_PGD_PTRS
15383 + movb $0,i*8(%rbx)
15384 + i = i + 1
15385 + .endr
15386 +
15387 +#ifdef CONFIG_PARAVIRT
15388 +2: popq %rdi
15389 +#endif
15390 + SET_RDI_INTO_CR3
15391 +
15392 +#ifdef CONFIG_PAX_KERNEXEC
15393 + GET_CR0_INTO_RDI
15394 + bts $16,%rdi
15395 + SET_RDI_INTO_CR0
15396 +#endif
15397 +
15398 +#ifdef CONFIG_PARAVIRT
15399 + PV_RESTORE_REGS(CLBR_RDI)
15400 +#endif
15401 +
15402 + popq %rbx
15403 + popq %rdi
15404 + pax_force_retaddr
15405 + retq
15406 +ENDPROC(pax_enter_kernel_user)
15407 +
15408 +ENTRY(pax_exit_kernel_user)
15409 + push %rdi
15410 +
15411 +#ifdef CONFIG_PARAVIRT
15412 + pushq %rbx
15413 + PV_SAVE_REGS(CLBR_RDI)
15414 +#endif
15415 +
15416 +#ifdef CONFIG_PAX_KERNEXEC
15417 + GET_CR0_INTO_RDI
15418 + btr $16,%rdi
15419 + SET_RDI_INTO_CR0
15420 +#endif
15421 +
15422 + GET_CR3_INTO_RDI
15423 + add $__START_KERNEL_map,%rdi
15424 + sub phys_base(%rip),%rdi
15425 +
15426 +#ifdef CONFIG_PARAVIRT
15427 + cmpl $0, pv_info+PARAVIRT_enabled
15428 + jz 1f
15429 + mov %rdi,%rbx
15430 + i = 0
15431 + .rept USER_PGD_PTRS
15432 + mov i*8(%rbx),%rsi
15433 + mov $0x67,%sil
15434 + lea i*8(%rbx),%rdi
15435 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15436 + i = i + 1
15437 + .endr
15438 + jmp 2f
15439 +1:
15440 +#endif
15441 +
15442 + i = 0
15443 + .rept USER_PGD_PTRS
15444 + movb $0x67,i*8(%rdi)
15445 + i = i + 1
15446 + .endr
15447 +
15448 +#ifdef CONFIG_PARAVIRT
15449 +2: PV_RESTORE_REGS(CLBR_RDI)
15450 + popq %rbx
15451 +#endif
15452 +
15453 + popq %rdi
15454 + pax_force_retaddr
15455 + retq
15456 +ENDPROC(pax_exit_kernel_user)
15457 +#endif
15458 +
15459 +.macro pax_erase_kstack
15460 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15461 + call pax_erase_kstack
15462 +#endif
15463 +.endm
15464 +
15465 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15466 +/*
15467 + * r11: thread_info
15468 + * rcx, rdx: can be clobbered
15469 + */
15470 +ENTRY(pax_erase_kstack)
15471 + pushq %rdi
15472 + pushq %rax
15473 + pushq %r11
15474 +
15475 + GET_THREAD_INFO(%r11)
15476 + mov TI_lowest_stack(%r11), %rdi
15477 + mov $-0xBEEF, %rax
15478 + std
15479 +
15480 +1: mov %edi, %ecx
15481 + and $THREAD_SIZE_asm - 1, %ecx
15482 + shr $3, %ecx
15483 + repne scasq
15484 + jecxz 2f
15485 +
15486 + cmp $2*8, %ecx
15487 + jc 2f
15488 +
15489 + mov $2*8, %ecx
15490 + repe scasq
15491 + jecxz 2f
15492 + jne 1b
15493 +
15494 +2: cld
15495 + mov %esp, %ecx
15496 + sub %edi, %ecx
15497 +
15498 + cmp $THREAD_SIZE_asm, %rcx
15499 + jb 3f
15500 + ud2
15501 +3:
15502 +
15503 + shr $3, %ecx
15504 + rep stosq
15505 +
15506 + mov TI_task_thread_sp0(%r11), %rdi
15507 + sub $256, %rdi
15508 + mov %rdi, TI_lowest_stack(%r11)
15509 +
15510 + popq %r11
15511 + popq %rax
15512 + popq %rdi
15513 + pax_force_retaddr
15514 + ret
15515 +ENDPROC(pax_erase_kstack)
15516 +#endif
15517
15518 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15519 #ifdef CONFIG_TRACE_IRQFLAGS
15520 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15521 .endm
15522
15523 .macro UNFAKE_STACK_FRAME
15524 - addq $8*6, %rsp
15525 - CFI_ADJUST_CFA_OFFSET -(6*8)
15526 + addq $8*6 + ARG_SKIP, %rsp
15527 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15528 .endm
15529
15530 /*
15531 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15532 movq %rsp, %rsi
15533
15534 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15535 - testl $3, CS(%rdi)
15536 + testb $3, CS(%rdi)
15537 je 1f
15538 SWAPGS
15539 /*
15540 @@ -356,9 +640,10 @@ ENTRY(save_rest)
15541 movq_cfi r15, R15+16
15542 movq %r11, 8(%rsp) /* return address */
15543 FIXUP_TOP_OF_STACK %r11, 16
15544 + pax_force_retaddr
15545 ret
15546 CFI_ENDPROC
15547 -END(save_rest)
15548 +ENDPROC(save_rest)
15549
15550 /* save complete stack frame */
15551 .pushsection .kprobes.text, "ax"
15552 @@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15553 js 1f /* negative -> in kernel */
15554 SWAPGS
15555 xorl %ebx,%ebx
15556 -1: ret
15557 +1: pax_force_retaddr_bts
15558 + ret
15559 CFI_ENDPROC
15560 -END(save_paranoid)
15561 +ENDPROC(save_paranoid)
15562 .popsection
15563
15564 /*
15565 @@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15566
15567 RESTORE_REST
15568
15569 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15570 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15571 jz retint_restore_args
15572
15573 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15574 @@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15575 jmp ret_from_sys_call # go to the SYSRET fastpath
15576
15577 CFI_ENDPROC
15578 -END(ret_from_fork)
15579 +ENDPROC(ret_from_fork)
15580
15581 /*
15582 * System call entry. Up to 6 arguments in registers are supported.
15583 @@ -457,7 +743,7 @@ END(ret_from_fork)
15584 ENTRY(system_call)
15585 CFI_STARTPROC simple
15586 CFI_SIGNAL_FRAME
15587 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15588 + CFI_DEF_CFA rsp,0
15589 CFI_REGISTER rip,rcx
15590 /*CFI_REGISTER rflags,r11*/
15591 SWAPGS_UNSAFE_STACK
15592 @@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15593
15594 movq %rsp,PER_CPU_VAR(old_rsp)
15595 movq PER_CPU_VAR(kernel_stack),%rsp
15596 + SAVE_ARGS 8*6,0
15597 + pax_enter_kernel_user
15598 /*
15599 * No need to follow this irqs off/on section - it's straight
15600 * and short:
15601 */
15602 ENABLE_INTERRUPTS(CLBR_NONE)
15603 - SAVE_ARGS 8,0
15604 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15605 movq %rcx,RIP-ARGOFFSET(%rsp)
15606 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15607 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15608 + GET_THREAD_INFO(%rcx)
15609 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15610 jnz tracesys
15611 system_call_fastpath:
15612 cmpq $__NR_syscall_max,%rax
15613 ja badsys
15614 - movq %r10,%rcx
15615 + movq R10-ARGOFFSET(%rsp),%rcx
15616 call *sys_call_table(,%rax,8) # XXX: rip relative
15617 movq %rax,RAX-ARGOFFSET(%rsp)
15618 /*
15619 @@ -498,10 +786,13 @@ sysret_check:
15620 LOCKDEP_SYS_EXIT
15621 DISABLE_INTERRUPTS(CLBR_NONE)
15622 TRACE_IRQS_OFF
15623 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15624 + GET_THREAD_INFO(%rcx)
15625 + movl TI_flags(%rcx),%edx
15626 andl %edi,%edx
15627 jnz sysret_careful
15628 CFI_REMEMBER_STATE
15629 + pax_exit_kernel_user
15630 + pax_erase_kstack
15631 /*
15632 * sysretq will re-enable interrupts:
15633 */
15634 @@ -553,14 +844,18 @@ badsys:
15635 * jump back to the normal fast path.
15636 */
15637 auditsys:
15638 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15639 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15640 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15641 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15642 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15643 movq %rax,%rsi /* 2nd arg: syscall number */
15644 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15645 call __audit_syscall_entry
15646 +
15647 + pax_erase_kstack
15648 +
15649 LOAD_ARGS 0 /* reload call-clobbered registers */
15650 + pax_set_fptr_mask
15651 jmp system_call_fastpath
15652
15653 /*
15654 @@ -581,7 +876,7 @@ sysret_audit:
15655 /* Do syscall tracing */
15656 tracesys:
15657 #ifdef CONFIG_AUDITSYSCALL
15658 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15659 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15660 jz auditsys
15661 #endif
15662 SAVE_REST
15663 @@ -589,16 +884,20 @@ tracesys:
15664 FIXUP_TOP_OF_STACK %rdi
15665 movq %rsp,%rdi
15666 call syscall_trace_enter
15667 +
15668 + pax_erase_kstack
15669 +
15670 /*
15671 * Reload arg registers from stack in case ptrace changed them.
15672 * We don't reload %rax because syscall_trace_enter() returned
15673 * the value it wants us to use in the table lookup.
15674 */
15675 LOAD_ARGS ARGOFFSET, 1
15676 + pax_set_fptr_mask
15677 RESTORE_REST
15678 cmpq $__NR_syscall_max,%rax
15679 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15680 - movq %r10,%rcx /* fixup for C */
15681 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15682 call *sys_call_table(,%rax,8)
15683 movq %rax,RAX-ARGOFFSET(%rsp)
15684 /* Use IRET because user could have changed frame */
15685 @@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15686 andl %edi,%edx
15687 jnz int_careful
15688 andl $~TS_COMPAT,TI_status(%rcx)
15689 + pax_erase_kstack
15690 jmp retint_swapgs
15691
15692 /* Either reschedule or signal or syscall exit tracking needed. */
15693 @@ -665,7 +965,7 @@ int_restore_rest:
15694 TRACE_IRQS_OFF
15695 jmp int_with_check
15696 CFI_ENDPROC
15697 -END(system_call)
15698 +ENDPROC(system_call)
15699
15700 /*
15701 * Certain special system calls that need to save a complete full stack frame.
15702 @@ -681,7 +981,7 @@ ENTRY(\label)
15703 call \func
15704 jmp ptregscall_common
15705 CFI_ENDPROC
15706 -END(\label)
15707 +ENDPROC(\label)
15708 .endm
15709
15710 PTREGSCALL stub_clone, sys_clone, %r8
15711 @@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15712 movq_cfi_restore R12+8, r12
15713 movq_cfi_restore RBP+8, rbp
15714 movq_cfi_restore RBX+8, rbx
15715 + pax_force_retaddr
15716 ret $REST_SKIP /* pop extended registers */
15717 CFI_ENDPROC
15718 -END(ptregscall_common)
15719 +ENDPROC(ptregscall_common)
15720
15721 ENTRY(stub_execve)
15722 CFI_STARTPROC
15723 @@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15724 RESTORE_REST
15725 jmp int_ret_from_sys_call
15726 CFI_ENDPROC
15727 -END(stub_execve)
15728 +ENDPROC(stub_execve)
15729
15730 /*
15731 * sigreturn is special because it needs to restore all registers on return.
15732 @@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15733 RESTORE_REST
15734 jmp int_ret_from_sys_call
15735 CFI_ENDPROC
15736 -END(stub_rt_sigreturn)
15737 +ENDPROC(stub_rt_sigreturn)
15738
15739 /*
15740 * Build the entry stubs and pointer table with some assembler magic.
15741 @@ -769,7 +1070,7 @@ vector=vector+1
15742 2: jmp common_interrupt
15743 .endr
15744 CFI_ENDPROC
15745 -END(irq_entries_start)
15746 +ENDPROC(irq_entries_start)
15747
15748 .previous
15749 END(interrupt)
15750 @@ -789,6 +1090,16 @@ END(interrupt)
15751 subq $ORIG_RAX-RBP, %rsp
15752 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15753 SAVE_ARGS_IRQ
15754 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15755 + testb $3, CS(%rdi)
15756 + jnz 1f
15757 + pax_enter_kernel
15758 + jmp 2f
15759 +1: pax_enter_kernel_user
15760 +2:
15761 +#else
15762 + pax_enter_kernel
15763 +#endif
15764 call \func
15765 .endm
15766
15767 @@ -820,7 +1131,7 @@ ret_from_intr:
15768
15769 exit_intr:
15770 GET_THREAD_INFO(%rcx)
15771 - testl $3,CS-ARGOFFSET(%rsp)
15772 + testb $3,CS-ARGOFFSET(%rsp)
15773 je retint_kernel
15774
15775 /* Interrupt came from user space */
15776 @@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
15777 * The iretq could re-enable interrupts:
15778 */
15779 DISABLE_INTERRUPTS(CLBR_ANY)
15780 + pax_exit_kernel_user
15781 TRACE_IRQS_IRETQ
15782 SWAPGS
15783 jmp restore_args
15784
15785 retint_restore_args: /* return to kernel space */
15786 DISABLE_INTERRUPTS(CLBR_ANY)
15787 + pax_exit_kernel
15788 + pax_force_retaddr RIP-ARGOFFSET
15789 /*
15790 * The iretq could re-enable interrupts:
15791 */
15792 @@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
15793 #endif
15794
15795 CFI_ENDPROC
15796 -END(common_interrupt)
15797 +ENDPROC(common_interrupt)
15798 /*
15799 * End of kprobes section
15800 */
15801 @@ -953,7 +1267,7 @@ ENTRY(\sym)
15802 interrupt \do_sym
15803 jmp ret_from_intr
15804 CFI_ENDPROC
15805 -END(\sym)
15806 +ENDPROC(\sym)
15807 .endm
15808
15809 #ifdef CONFIG_SMP
15810 @@ -1026,12 +1340,22 @@ ENTRY(\sym)
15811 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15812 call error_entry
15813 DEFAULT_FRAME 0
15814 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15815 + testb $3, CS(%rsp)
15816 + jnz 1f
15817 + pax_enter_kernel
15818 + jmp 2f
15819 +1: pax_enter_kernel_user
15820 +2:
15821 +#else
15822 + pax_enter_kernel
15823 +#endif
15824 movq %rsp,%rdi /* pt_regs pointer */
15825 xorl %esi,%esi /* no error code */
15826 call \do_sym
15827 jmp error_exit /* %ebx: no swapgs flag */
15828 CFI_ENDPROC
15829 -END(\sym)
15830 +ENDPROC(\sym)
15831 .endm
15832
15833 .macro paranoidzeroentry sym do_sym
15834 @@ -1043,15 +1367,25 @@ ENTRY(\sym)
15835 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15836 call save_paranoid
15837 TRACE_IRQS_OFF
15838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15839 + testb $3, CS(%rsp)
15840 + jnz 1f
15841 + pax_enter_kernel
15842 + jmp 2f
15843 +1: pax_enter_kernel_user
15844 +2:
15845 +#else
15846 + pax_enter_kernel
15847 +#endif
15848 movq %rsp,%rdi /* pt_regs pointer */
15849 xorl %esi,%esi /* no error code */
15850 call \do_sym
15851 jmp paranoid_exit /* %ebx: no swapgs flag */
15852 CFI_ENDPROC
15853 -END(\sym)
15854 +ENDPROC(\sym)
15855 .endm
15856
15857 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15858 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15859 .macro paranoidzeroentry_ist sym do_sym ist
15860 ENTRY(\sym)
15861 INTR_FRAME
15862 @@ -1061,14 +1395,30 @@ ENTRY(\sym)
15863 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15864 call save_paranoid
15865 TRACE_IRQS_OFF
15866 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15867 + testb $3, CS(%rsp)
15868 + jnz 1f
15869 + pax_enter_kernel
15870 + jmp 2f
15871 +1: pax_enter_kernel_user
15872 +2:
15873 +#else
15874 + pax_enter_kernel
15875 +#endif
15876 movq %rsp,%rdi /* pt_regs pointer */
15877 xorl %esi,%esi /* no error code */
15878 +#ifdef CONFIG_SMP
15879 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15880 + lea init_tss(%r12), %r12
15881 +#else
15882 + lea init_tss(%rip), %r12
15883 +#endif
15884 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15885 call \do_sym
15886 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15887 jmp paranoid_exit /* %ebx: no swapgs flag */
15888 CFI_ENDPROC
15889 -END(\sym)
15890 +ENDPROC(\sym)
15891 .endm
15892
15893 .macro errorentry sym do_sym
15894 @@ -1079,13 +1429,23 @@ ENTRY(\sym)
15895 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15896 call error_entry
15897 DEFAULT_FRAME 0
15898 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15899 + testb $3, CS(%rsp)
15900 + jnz 1f
15901 + pax_enter_kernel
15902 + jmp 2f
15903 +1: pax_enter_kernel_user
15904 +2:
15905 +#else
15906 + pax_enter_kernel
15907 +#endif
15908 movq %rsp,%rdi /* pt_regs pointer */
15909 movq ORIG_RAX(%rsp),%rsi /* get error code */
15910 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15911 call \do_sym
15912 jmp error_exit /* %ebx: no swapgs flag */
15913 CFI_ENDPROC
15914 -END(\sym)
15915 +ENDPROC(\sym)
15916 .endm
15917
15918 /* error code is on the stack already */
15919 @@ -1098,13 +1458,23 @@ ENTRY(\sym)
15920 call save_paranoid
15921 DEFAULT_FRAME 0
15922 TRACE_IRQS_OFF
15923 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15924 + testb $3, CS(%rsp)
15925 + jnz 1f
15926 + pax_enter_kernel
15927 + jmp 2f
15928 +1: pax_enter_kernel_user
15929 +2:
15930 +#else
15931 + pax_enter_kernel
15932 +#endif
15933 movq %rsp,%rdi /* pt_regs pointer */
15934 movq ORIG_RAX(%rsp),%rsi /* get error code */
15935 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15936 call \do_sym
15937 jmp paranoid_exit /* %ebx: no swapgs flag */
15938 CFI_ENDPROC
15939 -END(\sym)
15940 +ENDPROC(\sym)
15941 .endm
15942
15943 zeroentry divide_error do_divide_error
15944 @@ -1134,9 +1504,10 @@ gs_change:
15945 2: mfence /* workaround */
15946 SWAPGS
15947 popfq_cfi
15948 + pax_force_retaddr
15949 ret
15950 CFI_ENDPROC
15951 -END(native_load_gs_index)
15952 +ENDPROC(native_load_gs_index)
15953
15954 .section __ex_table,"a"
15955 .align 8
15956 @@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
15957 * Here we are in the child and the registers are set as they were
15958 * at kernel_thread() invocation in the parent.
15959 */
15960 + pax_force_fptr %rsi
15961 call *%rsi
15962 # exit
15963 mov %eax, %edi
15964 call do_exit
15965 ud2 # padding for call trace
15966 CFI_ENDPROC
15967 -END(kernel_thread_helper)
15968 +ENDPROC(kernel_thread_helper)
15969
15970 /*
15971 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15972 @@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
15973 RESTORE_REST
15974 testq %rax,%rax
15975 je int_ret_from_sys_call
15976 - RESTORE_ARGS
15977 UNFAKE_STACK_FRAME
15978 + pax_force_retaddr
15979 ret
15980 CFI_ENDPROC
15981 -END(kernel_execve)
15982 +ENDPROC(kernel_execve)
15983
15984 /* Call softirq on interrupt stack. Interrupts are off. */
15985 ENTRY(call_softirq)
15986 @@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
15987 CFI_DEF_CFA_REGISTER rsp
15988 CFI_ADJUST_CFA_OFFSET -8
15989 decl PER_CPU_VAR(irq_count)
15990 + pax_force_retaddr
15991 ret
15992 CFI_ENDPROC
15993 -END(call_softirq)
15994 +ENDPROC(call_softirq)
15995
15996 #ifdef CONFIG_XEN
15997 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15998 @@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15999 decl PER_CPU_VAR(irq_count)
16000 jmp error_exit
16001 CFI_ENDPROC
16002 -END(xen_do_hypervisor_callback)
16003 +ENDPROC(xen_do_hypervisor_callback)
16004
16005 /*
16006 * Hypervisor uses this for application faults while it executes.
16007 @@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
16008 SAVE_ALL
16009 jmp error_exit
16010 CFI_ENDPROC
16011 -END(xen_failsafe_callback)
16012 +ENDPROC(xen_failsafe_callback)
16013
16014 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16015 xen_hvm_callback_vector xen_evtchn_do_upcall
16016 @@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
16017 TRACE_IRQS_OFF
16018 testl %ebx,%ebx /* swapgs needed? */
16019 jnz paranoid_restore
16020 - testl $3,CS(%rsp)
16021 + testb $3,CS(%rsp)
16022 jnz paranoid_userspace
16023 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16024 + pax_exit_kernel
16025 + TRACE_IRQS_IRETQ 0
16026 + SWAPGS_UNSAFE_STACK
16027 + RESTORE_ALL 8
16028 + pax_force_retaddr_bts
16029 + jmp irq_return
16030 +#endif
16031 paranoid_swapgs:
16032 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16033 + pax_exit_kernel_user
16034 +#else
16035 + pax_exit_kernel
16036 +#endif
16037 TRACE_IRQS_IRETQ 0
16038 SWAPGS_UNSAFE_STACK
16039 RESTORE_ALL 8
16040 jmp irq_return
16041 paranoid_restore:
16042 + pax_exit_kernel
16043 TRACE_IRQS_IRETQ 0
16044 RESTORE_ALL 8
16045 + pax_force_retaddr_bts
16046 jmp irq_return
16047 paranoid_userspace:
16048 GET_THREAD_INFO(%rcx)
16049 @@ -1399,7 +1787,7 @@ paranoid_schedule:
16050 TRACE_IRQS_OFF
16051 jmp paranoid_userspace
16052 CFI_ENDPROC
16053 -END(paranoid_exit)
16054 +ENDPROC(paranoid_exit)
16055
16056 /*
16057 * Exception entry point. This expects an error code/orig_rax on the stack.
16058 @@ -1426,12 +1814,13 @@ ENTRY(error_entry)
16059 movq_cfi r14, R14+8
16060 movq_cfi r15, R15+8
16061 xorl %ebx,%ebx
16062 - testl $3,CS+8(%rsp)
16063 + testb $3,CS+8(%rsp)
16064 je error_kernelspace
16065 error_swapgs:
16066 SWAPGS
16067 error_sti:
16068 TRACE_IRQS_OFF
16069 + pax_force_retaddr_bts
16070 ret
16071
16072 /*
16073 @@ -1458,7 +1847,7 @@ bstep_iret:
16074 movq %rcx,RIP+8(%rsp)
16075 jmp error_swapgs
16076 CFI_ENDPROC
16077 -END(error_entry)
16078 +ENDPROC(error_entry)
16079
16080
16081 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16082 @@ -1478,7 +1867,7 @@ ENTRY(error_exit)
16083 jnz retint_careful
16084 jmp retint_swapgs
16085 CFI_ENDPROC
16086 -END(error_exit)
16087 +ENDPROC(error_exit)
16088
16089 /*
16090 * Test if a given stack is an NMI stack or not.
16091 @@ -1535,9 +1924,11 @@ ENTRY(nmi)
16092 * If %cs was not the kernel segment, then the NMI triggered in user
16093 * space, which means it is definitely not nested.
16094 */
16095 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16096 + je 1f
16097 cmpl $__KERNEL_CS, 16(%rsp)
16098 jne first_nmi
16099 -
16100 +1:
16101 /*
16102 * Check the special variable on the stack to see if NMIs are
16103 * executing.
16104 @@ -1659,6 +2050,16 @@ restart_nmi:
16105 */
16106 call save_paranoid
16107 DEFAULT_FRAME 0
16108 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16109 + testb $3, CS(%rsp)
16110 + jnz 1f
16111 + pax_enter_kernel
16112 + jmp 2f
16113 +1: pax_enter_kernel_user
16114 +2:
16115 +#else
16116 + pax_enter_kernel
16117 +#endif
16118 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16119 movq %rsp,%rdi
16120 movq $-1,%rsi
16121 @@ -1666,14 +2067,25 @@ restart_nmi:
16122 testl %ebx,%ebx /* swapgs needed? */
16123 jnz nmi_restore
16124 nmi_swapgs:
16125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16126 + pax_exit_kernel_user
16127 +#else
16128 + pax_exit_kernel
16129 +#endif
16130 SWAPGS_UNSAFE_STACK
16131 + RESTORE_ALL 8
16132 + /* Clear the NMI executing stack variable */
16133 + movq $0, 10*8(%rsp)
16134 + jmp irq_return
16135 nmi_restore:
16136 + pax_exit_kernel
16137 RESTORE_ALL 8
16138 + pax_force_retaddr_bts
16139 /* Clear the NMI executing stack variable */
16140 movq $0, 10*8(%rsp)
16141 jmp irq_return
16142 CFI_ENDPROC
16143 -END(nmi)
16144 +ENDPROC(nmi)
16145
16146 /*
16147 * If an NMI hit an iret because of an exception or breakpoint,
16148 @@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16149 mov $-ENOSYS,%eax
16150 sysret
16151 CFI_ENDPROC
16152 -END(ignore_sysret)
16153 +ENDPROC(ignore_sysret)
16154
16155 /*
16156 * End of kprobes section
16157 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16158 index c9a281f..ce2f317 100644
16159 --- a/arch/x86/kernel/ftrace.c
16160 +++ b/arch/x86/kernel/ftrace.c
16161 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16162 static const void *mod_code_newcode; /* holds the text to write to the IP */
16163
16164 static unsigned nmi_wait_count;
16165 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16166 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16167
16168 int ftrace_arch_read_dyn_info(char *buf, int size)
16169 {
16170 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16171
16172 r = snprintf(buf, size, "%u %u",
16173 nmi_wait_count,
16174 - atomic_read(&nmi_update_count));
16175 + atomic_read_unchecked(&nmi_update_count));
16176 return r;
16177 }
16178
16179 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16180
16181 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16182 smp_rmb();
16183 + pax_open_kernel();
16184 ftrace_mod_code();
16185 - atomic_inc(&nmi_update_count);
16186 + pax_close_kernel();
16187 + atomic_inc_unchecked(&nmi_update_count);
16188 }
16189 /* Must have previous changes seen before executions */
16190 smp_mb();
16191 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16192 {
16193 unsigned char replaced[MCOUNT_INSN_SIZE];
16194
16195 + ip = ktla_ktva(ip);
16196 +
16197 /*
16198 * Note: Due to modules and __init, code can
16199 * disappear and change, we need to protect against faulting
16200 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16201 unsigned char old[MCOUNT_INSN_SIZE], *new;
16202 int ret;
16203
16204 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16205 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16206 new = ftrace_call_replace(ip, (unsigned long)func);
16207 ret = ftrace_modify_code(ip, old, new);
16208
16209 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16210 {
16211 unsigned char code[MCOUNT_INSN_SIZE];
16212
16213 + ip = ktla_ktva(ip);
16214 +
16215 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16216 return -EFAULT;
16217
16218 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16219 index 51ff186..9e77418 100644
16220 --- a/arch/x86/kernel/head32.c
16221 +++ b/arch/x86/kernel/head32.c
16222 @@ -19,6 +19,7 @@
16223 #include <asm/io_apic.h>
16224 #include <asm/bios_ebda.h>
16225 #include <asm/tlbflush.h>
16226 +#include <asm/boot.h>
16227
16228 static void __init i386_default_early_setup(void)
16229 {
16230 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16231
16232 void __init i386_start_kernel(void)
16233 {
16234 - memblock_reserve(__pa_symbol(&_text),
16235 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16236 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16237
16238 #ifdef CONFIG_BLK_DEV_INITRD
16239 /* Reserve INITRD */
16240 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16241 index ce0be7c..c41476e 100644
16242 --- a/arch/x86/kernel/head_32.S
16243 +++ b/arch/x86/kernel/head_32.S
16244 @@ -25,6 +25,12 @@
16245 /* Physical address */
16246 #define pa(X) ((X) - __PAGE_OFFSET)
16247
16248 +#ifdef CONFIG_PAX_KERNEXEC
16249 +#define ta(X) (X)
16250 +#else
16251 +#define ta(X) ((X) - __PAGE_OFFSET)
16252 +#endif
16253 +
16254 /*
16255 * References to members of the new_cpu_data structure.
16256 */
16257 @@ -54,11 +60,7 @@
16258 * and small than max_low_pfn, otherwise will waste some page table entries
16259 */
16260
16261 -#if PTRS_PER_PMD > 1
16262 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16263 -#else
16264 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16265 -#endif
16266 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16267
16268 /* Number of possible pages in the lowmem region */
16269 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16270 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16271 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16272
16273 /*
16274 + * Real beginning of normal "text" segment
16275 + */
16276 +ENTRY(stext)
16277 +ENTRY(_stext)
16278 +
16279 +/*
16280 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16281 * %esi points to the real-mode code as a 32-bit pointer.
16282 * CS and DS must be 4 GB flat segments, but we don't depend on
16283 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16284 * can.
16285 */
16286 __HEAD
16287 +
16288 +#ifdef CONFIG_PAX_KERNEXEC
16289 + jmp startup_32
16290 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16291 +.fill PAGE_SIZE-5,1,0xcc
16292 +#endif
16293 +
16294 ENTRY(startup_32)
16295 movl pa(stack_start),%ecx
16296
16297 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16298 2:
16299 leal -__PAGE_OFFSET(%ecx),%esp
16300
16301 +#ifdef CONFIG_SMP
16302 + movl $pa(cpu_gdt_table),%edi
16303 + movl $__per_cpu_load,%eax
16304 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16305 + rorl $16,%eax
16306 + movb %al,__KERNEL_PERCPU + 4(%edi)
16307 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16308 + movl $__per_cpu_end - 1,%eax
16309 + subl $__per_cpu_start,%eax
16310 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16311 +#endif
16312 +
16313 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16314 + movl $NR_CPUS,%ecx
16315 + movl $pa(cpu_gdt_table),%edi
16316 +1:
16317 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16318 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16319 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16320 + addl $PAGE_SIZE_asm,%edi
16321 + loop 1b
16322 +#endif
16323 +
16324 +#ifdef CONFIG_PAX_KERNEXEC
16325 + movl $pa(boot_gdt),%edi
16326 + movl $__LOAD_PHYSICAL_ADDR,%eax
16327 + movw %ax,__BOOT_CS + 2(%edi)
16328 + rorl $16,%eax
16329 + movb %al,__BOOT_CS + 4(%edi)
16330 + movb %ah,__BOOT_CS + 7(%edi)
16331 + rorl $16,%eax
16332 +
16333 + ljmp $(__BOOT_CS),$1f
16334 +1:
16335 +
16336 + movl $NR_CPUS,%ecx
16337 + movl $pa(cpu_gdt_table),%edi
16338 + addl $__PAGE_OFFSET,%eax
16339 +1:
16340 + movw %ax,__KERNEL_CS + 2(%edi)
16341 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16342 + rorl $16,%eax
16343 + movb %al,__KERNEL_CS + 4(%edi)
16344 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16345 + movb %ah,__KERNEL_CS + 7(%edi)
16346 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16347 + rorl $16,%eax
16348 + addl $PAGE_SIZE_asm,%edi
16349 + loop 1b
16350 +#endif
16351 +
16352 /*
16353 * Clear BSS first so that there are no surprises...
16354 */
16355 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16356 movl %eax, pa(max_pfn_mapped)
16357
16358 /* Do early initialization of the fixmap area */
16359 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16360 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16361 +#ifdef CONFIG_COMPAT_VDSO
16362 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16363 +#else
16364 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16365 +#endif
16366 #else /* Not PAE */
16367
16368 page_pde_offset = (__PAGE_OFFSET >> 20);
16369 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16370 movl %eax, pa(max_pfn_mapped)
16371
16372 /* Do early initialization of the fixmap area */
16373 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16374 - movl %eax,pa(initial_page_table+0xffc)
16375 +#ifdef CONFIG_COMPAT_VDSO
16376 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16377 +#else
16378 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16379 +#endif
16380 #endif
16381
16382 #ifdef CONFIG_PARAVIRT
16383 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16384 cmpl $num_subarch_entries, %eax
16385 jae bad_subarch
16386
16387 - movl pa(subarch_entries)(,%eax,4), %eax
16388 - subl $__PAGE_OFFSET, %eax
16389 - jmp *%eax
16390 + jmp *pa(subarch_entries)(,%eax,4)
16391
16392 bad_subarch:
16393 WEAK(lguest_entry)
16394 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16395 __INITDATA
16396
16397 subarch_entries:
16398 - .long default_entry /* normal x86/PC */
16399 - .long lguest_entry /* lguest hypervisor */
16400 - .long xen_entry /* Xen hypervisor */
16401 - .long default_entry /* Moorestown MID */
16402 + .long ta(default_entry) /* normal x86/PC */
16403 + .long ta(lguest_entry) /* lguest hypervisor */
16404 + .long ta(xen_entry) /* Xen hypervisor */
16405 + .long ta(default_entry) /* Moorestown MID */
16406 num_subarch_entries = (. - subarch_entries) / 4
16407 .previous
16408 #else
16409 @@ -312,6 +382,7 @@ default_entry:
16410 orl %edx,%eax
16411 movl %eax,%cr4
16412
16413 +#ifdef CONFIG_X86_PAE
16414 testb $X86_CR4_PAE, %al # check if PAE is enabled
16415 jz 6f
16416
16417 @@ -340,6 +411,9 @@ default_entry:
16418 /* Make changes effective */
16419 wrmsr
16420
16421 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16422 +#endif
16423 +
16424 6:
16425
16426 /*
16427 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16428 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16429 movl %eax,%ss # after changing gdt.
16430
16431 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16432 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16433 movl %eax,%ds
16434 movl %eax,%es
16435
16436 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16437 */
16438 cmpb $0,ready
16439 jne 1f
16440 - movl $gdt_page,%eax
16441 + movl $cpu_gdt_table,%eax
16442 movl $stack_canary,%ecx
16443 +#ifdef CONFIG_SMP
16444 + addl $__per_cpu_load,%ecx
16445 +#endif
16446 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16447 shrl $16, %ecx
16448 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16449 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16450 1:
16451 -#endif
16452 movl $(__KERNEL_STACK_CANARY),%eax
16453 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16454 + movl $(__USER_DS),%eax
16455 +#else
16456 + xorl %eax,%eax
16457 +#endif
16458 movl %eax,%gs
16459
16460 xorl %eax,%eax # Clear LDT
16461 @@ -558,22 +639,22 @@ early_page_fault:
16462 jmp early_fault
16463
16464 early_fault:
16465 - cld
16466 #ifdef CONFIG_PRINTK
16467 + cmpl $1,%ss:early_recursion_flag
16468 + je hlt_loop
16469 + incl %ss:early_recursion_flag
16470 + cld
16471 pusha
16472 movl $(__KERNEL_DS),%eax
16473 movl %eax,%ds
16474 movl %eax,%es
16475 - cmpl $2,early_recursion_flag
16476 - je hlt_loop
16477 - incl early_recursion_flag
16478 movl %cr2,%eax
16479 pushl %eax
16480 pushl %edx /* trapno */
16481 pushl $fault_msg
16482 call printk
16483 +; call dump_stack
16484 #endif
16485 - call dump_stack
16486 hlt_loop:
16487 hlt
16488 jmp hlt_loop
16489 @@ -581,8 +662,11 @@ hlt_loop:
16490 /* This is the default interrupt "handler" :-) */
16491 ALIGN
16492 ignore_int:
16493 - cld
16494 #ifdef CONFIG_PRINTK
16495 + cmpl $2,%ss:early_recursion_flag
16496 + je hlt_loop
16497 + incl %ss:early_recursion_flag
16498 + cld
16499 pushl %eax
16500 pushl %ecx
16501 pushl %edx
16502 @@ -591,9 +675,6 @@ ignore_int:
16503 movl $(__KERNEL_DS),%eax
16504 movl %eax,%ds
16505 movl %eax,%es
16506 - cmpl $2,early_recursion_flag
16507 - je hlt_loop
16508 - incl early_recursion_flag
16509 pushl 16(%esp)
16510 pushl 24(%esp)
16511 pushl 32(%esp)
16512 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16513 /*
16514 * BSS section
16515 */
16516 -__PAGE_ALIGNED_BSS
16517 - .align PAGE_SIZE
16518 #ifdef CONFIG_X86_PAE
16519 +.section .initial_pg_pmd,"a",@progbits
16520 initial_pg_pmd:
16521 .fill 1024*KPMDS,4,0
16522 #else
16523 +.section .initial_page_table,"a",@progbits
16524 ENTRY(initial_page_table)
16525 .fill 1024,4,0
16526 #endif
16527 +.section .initial_pg_fixmap,"a",@progbits
16528 initial_pg_fixmap:
16529 .fill 1024,4,0
16530 +.section .empty_zero_page,"a",@progbits
16531 ENTRY(empty_zero_page)
16532 .fill 4096,1,0
16533 +.section .swapper_pg_dir,"a",@progbits
16534 ENTRY(swapper_pg_dir)
16535 +#ifdef CONFIG_X86_PAE
16536 + .fill 4,8,0
16537 +#else
16538 .fill 1024,4,0
16539 +#endif
16540 +
16541 +/*
16542 + * The IDT has to be page-aligned to simplify the Pentium
16543 + * F0 0F bug workaround.. We have a special link segment
16544 + * for this.
16545 + */
16546 +.section .idt,"a",@progbits
16547 +ENTRY(idt_table)
16548 + .fill 256,8,0
16549
16550 /*
16551 * This starts the data section.
16552 */
16553 #ifdef CONFIG_X86_PAE
16554 -__PAGE_ALIGNED_DATA
16555 - /* Page-aligned for the benefit of paravirt? */
16556 - .align PAGE_SIZE
16557 +.section .initial_page_table,"a",@progbits
16558 ENTRY(initial_page_table)
16559 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16560 # if KPMDS == 3
16561 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16562 # error "Kernel PMDs should be 1, 2 or 3"
16563 # endif
16564 .align PAGE_SIZE /* needs to be page-sized too */
16565 +
16566 +#ifdef CONFIG_PAX_PER_CPU_PGD
16567 +ENTRY(cpu_pgd)
16568 + .rept NR_CPUS
16569 + .fill 4,8,0
16570 + .endr
16571 +#endif
16572 +
16573 #endif
16574
16575 .data
16576 .balign 4
16577 ENTRY(stack_start)
16578 - .long init_thread_union+THREAD_SIZE
16579 + .long init_thread_union+THREAD_SIZE-8
16580
16581 +ready: .byte 0
16582 +
16583 +.section .rodata,"a",@progbits
16584 early_recursion_flag:
16585 .long 0
16586
16587 -ready: .byte 0
16588 -
16589 int_msg:
16590 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16591
16592 @@ -707,7 +811,7 @@ fault_msg:
16593 .word 0 # 32 bit align gdt_desc.address
16594 boot_gdt_descr:
16595 .word __BOOT_DS+7
16596 - .long boot_gdt - __PAGE_OFFSET
16597 + .long pa(boot_gdt)
16598
16599 .word 0 # 32-bit align idt_desc.address
16600 idt_descr:
16601 @@ -718,7 +822,7 @@ idt_descr:
16602 .word 0 # 32 bit align gdt_desc.address
16603 ENTRY(early_gdt_descr)
16604 .word GDT_ENTRIES*8-1
16605 - .long gdt_page /* Overwritten for secondary CPUs */
16606 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16607
16608 /*
16609 * The boot_gdt must mirror the equivalent in setup.S and is
16610 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16611 .align L1_CACHE_BYTES
16612 ENTRY(boot_gdt)
16613 .fill GDT_ENTRY_BOOT_CS,8,0
16614 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16615 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16616 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16617 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16618 +
16619 + .align PAGE_SIZE_asm
16620 +ENTRY(cpu_gdt_table)
16621 + .rept NR_CPUS
16622 + .quad 0x0000000000000000 /* NULL descriptor */
16623 + .quad 0x0000000000000000 /* 0x0b reserved */
16624 + .quad 0x0000000000000000 /* 0x13 reserved */
16625 + .quad 0x0000000000000000 /* 0x1b reserved */
16626 +
16627 +#ifdef CONFIG_PAX_KERNEXEC
16628 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16629 +#else
16630 + .quad 0x0000000000000000 /* 0x20 unused */
16631 +#endif
16632 +
16633 + .quad 0x0000000000000000 /* 0x28 unused */
16634 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16635 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16636 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16637 + .quad 0x0000000000000000 /* 0x4b reserved */
16638 + .quad 0x0000000000000000 /* 0x53 reserved */
16639 + .quad 0x0000000000000000 /* 0x5b reserved */
16640 +
16641 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16642 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16643 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16644 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16645 +
16646 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16647 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16648 +
16649 + /*
16650 + * Segments used for calling PnP BIOS have byte granularity.
16651 + * The code segments and data segments have fixed 64k limits,
16652 + * the transfer segment sizes are set at run time.
16653 + */
16654 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16655 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16656 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16657 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16658 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16659 +
16660 + /*
16661 + * The APM segments have byte granularity and their bases
16662 + * are set at run time. All have 64k limits.
16663 + */
16664 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16665 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16666 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16667 +
16668 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16669 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16670 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16671 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16672 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16673 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16674 +
16675 + /* Be sure this is zeroed to avoid false validations in Xen */
16676 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16677 + .endr
16678 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16679 index 40f4eb3..6d24d9d 100644
16680 --- a/arch/x86/kernel/head_64.S
16681 +++ b/arch/x86/kernel/head_64.S
16682 @@ -19,6 +19,8 @@
16683 #include <asm/cache.h>
16684 #include <asm/processor-flags.h>
16685 #include <asm/percpu.h>
16686 +#include <asm/cpufeature.h>
16687 +#include <asm/alternative-asm.h>
16688
16689 #ifdef CONFIG_PARAVIRT
16690 #include <asm/asm-offsets.h>
16691 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16692 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16693 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16694 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16695 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16696 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16697 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16698 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16699 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16700 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16701
16702 .text
16703 __HEAD
16704 @@ -85,35 +93,23 @@ startup_64:
16705 */
16706 addq %rbp, init_level4_pgt + 0(%rip)
16707 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16708 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16709 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16710 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16711 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16712
16713 addq %rbp, level3_ident_pgt + 0(%rip)
16714 +#ifndef CONFIG_XEN
16715 + addq %rbp, level3_ident_pgt + 8(%rip)
16716 +#endif
16717
16718 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16719 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16720 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16721 +
16722 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16723 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16724
16725 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16726 -
16727 - /* Add an Identity mapping if I am above 1G */
16728 - leaq _text(%rip), %rdi
16729 - andq $PMD_PAGE_MASK, %rdi
16730 -
16731 - movq %rdi, %rax
16732 - shrq $PUD_SHIFT, %rax
16733 - andq $(PTRS_PER_PUD - 1), %rax
16734 - jz ident_complete
16735 -
16736 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16737 - leaq level3_ident_pgt(%rip), %rbx
16738 - movq %rdx, 0(%rbx, %rax, 8)
16739 -
16740 - movq %rdi, %rax
16741 - shrq $PMD_SHIFT, %rax
16742 - andq $(PTRS_PER_PMD - 1), %rax
16743 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16744 - leaq level2_spare_pgt(%rip), %rbx
16745 - movq %rdx, 0(%rbx, %rax, 8)
16746 -ident_complete:
16747 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16748
16749 /*
16750 * Fixup the kernel text+data virtual addresses. Note that
16751 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16752 * after the boot processor executes this code.
16753 */
16754
16755 - /* Enable PAE mode and PGE */
16756 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16757 + /* Enable PAE mode and PSE/PGE */
16758 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16759 movq %rax, %cr4
16760
16761 /* Setup early boot stage 4 level pagetables. */
16762 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16763 movl $MSR_EFER, %ecx
16764 rdmsr
16765 btsl $_EFER_SCE, %eax /* Enable System Call */
16766 - btl $20,%edi /* No Execute supported? */
16767 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16768 jnc 1f
16769 btsl $_EFER_NX, %eax
16770 + leaq init_level4_pgt(%rip), %rdi
16771 +#ifndef CONFIG_EFI
16772 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16773 +#endif
16774 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16775 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16776 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16777 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16778 1: wrmsr /* Make changes effective */
16779
16780 /* Setup cr0 */
16781 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16782 * jump. In addition we need to ensure %cs is set so we make this
16783 * a far return.
16784 */
16785 + pax_set_fptr_mask
16786 movq initial_code(%rip),%rax
16787 pushq $0 # fake return address to stop unwinder
16788 pushq $__KERNEL_CS # set correct cs
16789 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16790 bad_address:
16791 jmp bad_address
16792
16793 - .section ".init.text","ax"
16794 + __INIT
16795 #ifdef CONFIG_EARLY_PRINTK
16796 .globl early_idt_handlers
16797 early_idt_handlers:
16798 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16799 #endif /* EARLY_PRINTK */
16800 1: hlt
16801 jmp 1b
16802 + .previous
16803
16804 #ifdef CONFIG_EARLY_PRINTK
16805 + __INITDATA
16806 early_recursion_flag:
16807 .long 0
16808 + .previous
16809
16810 + .section .rodata,"a",@progbits
16811 early_idt_msg:
16812 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16813 early_idt_ripmsg:
16814 .asciz "RIP %s\n"
16815 + .previous
16816 #endif /* CONFIG_EARLY_PRINTK */
16817 - .previous
16818
16819 + .section .rodata,"a",@progbits
16820 #define NEXT_PAGE(name) \
16821 .balign PAGE_SIZE; \
16822 ENTRY(name)
16823 @@ -338,7 +348,6 @@ ENTRY(name)
16824 i = i + 1 ; \
16825 .endr
16826
16827 - .data
16828 /*
16829 * This default setting generates an ident mapping at address 0x100000
16830 * and a mapping for the kernel that precisely maps virtual address
16831 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16832 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16833 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16834 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16835 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
16836 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16837 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
16838 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16839 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16840 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16841 .org init_level4_pgt + L4_START_KERNEL*8, 0
16842 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16843 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16844
16845 +#ifdef CONFIG_PAX_PER_CPU_PGD
16846 +NEXT_PAGE(cpu_pgd)
16847 + .rept NR_CPUS
16848 + .fill 512,8,0
16849 + .endr
16850 +#endif
16851 +
16852 NEXT_PAGE(level3_ident_pgt)
16853 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16854 +#ifdef CONFIG_XEN
16855 .fill 511,8,0
16856 +#else
16857 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16858 + .fill 510,8,0
16859 +#endif
16860 +
16861 +NEXT_PAGE(level3_vmalloc_start_pgt)
16862 + .fill 512,8,0
16863 +
16864 +NEXT_PAGE(level3_vmalloc_end_pgt)
16865 + .fill 512,8,0
16866 +
16867 +NEXT_PAGE(level3_vmemmap_pgt)
16868 + .fill L3_VMEMMAP_START,8,0
16869 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16870
16871 NEXT_PAGE(level3_kernel_pgt)
16872 .fill L3_START_KERNEL,8,0
16873 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16874 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16875 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16876
16877 +NEXT_PAGE(level2_vmemmap_pgt)
16878 + .fill 512,8,0
16879 +
16880 NEXT_PAGE(level2_fixmap_pgt)
16881 - .fill 506,8,0
16882 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16883 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16884 - .fill 5,8,0
16885 + .fill 507,8,0
16886 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16887 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16888 + .fill 4,8,0
16889
16890 -NEXT_PAGE(level1_fixmap_pgt)
16891 +NEXT_PAGE(level1_vsyscall_pgt)
16892 .fill 512,8,0
16893
16894 -NEXT_PAGE(level2_ident_pgt)
16895 - /* Since I easily can, map the first 1G.
16896 + /* Since I easily can, map the first 2G.
16897 * Don't set NX because code runs from these pages.
16898 */
16899 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16900 +NEXT_PAGE(level2_ident_pgt)
16901 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16902
16903 NEXT_PAGE(level2_kernel_pgt)
16904 /*
16905 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
16906 * If you want to increase this then increase MODULES_VADDR
16907 * too.)
16908 */
16909 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16910 - KERNEL_IMAGE_SIZE/PMD_SIZE)
16911 -
16912 -NEXT_PAGE(level2_spare_pgt)
16913 - .fill 512, 8, 0
16914 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16915
16916 #undef PMDS
16917 #undef NEXT_PAGE
16918
16919 - .data
16920 + .align PAGE_SIZE
16921 +ENTRY(cpu_gdt_table)
16922 + .rept NR_CPUS
16923 + .quad 0x0000000000000000 /* NULL descriptor */
16924 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16925 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
16926 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
16927 + .quad 0x00cffb000000ffff /* __USER32_CS */
16928 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16929 + .quad 0x00affb000000ffff /* __USER_CS */
16930 +
16931 +#ifdef CONFIG_PAX_KERNEXEC
16932 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16933 +#else
16934 + .quad 0x0 /* unused */
16935 +#endif
16936 +
16937 + .quad 0,0 /* TSS */
16938 + .quad 0,0 /* LDT */
16939 + .quad 0,0,0 /* three TLS descriptors */
16940 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
16941 + /* asm/segment.h:GDT_ENTRIES must match this */
16942 +
16943 + /* zero the remaining page */
16944 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16945 + .endr
16946 +
16947 .align 16
16948 .globl early_gdt_descr
16949 early_gdt_descr:
16950 .word GDT_ENTRIES*8-1
16951 early_gdt_descr_base:
16952 - .quad INIT_PER_CPU_VAR(gdt_page)
16953 + .quad cpu_gdt_table
16954
16955 ENTRY(phys_base)
16956 /* This must match the first entry in level2_kernel_pgt */
16957 .quad 0x0000000000000000
16958
16959 #include "../../x86/xen/xen-head.S"
16960 -
16961 - .section .bss, "aw", @nobits
16962 +
16963 + .section .rodata,"a",@progbits
16964 .align L1_CACHE_BYTES
16965 ENTRY(idt_table)
16966 - .skip IDT_ENTRIES * 16
16967 + .fill 512,8,0
16968
16969 .align L1_CACHE_BYTES
16970 ENTRY(nmi_idt_table)
16971 - .skip IDT_ENTRIES * 16
16972 + .fill 512,8,0
16973
16974 __PAGE_ALIGNED_BSS
16975 .align PAGE_SIZE
16976 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16977 index 9c3bd4a..e1d9b35 100644
16978 --- a/arch/x86/kernel/i386_ksyms_32.c
16979 +++ b/arch/x86/kernel/i386_ksyms_32.c
16980 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16981 EXPORT_SYMBOL(cmpxchg8b_emu);
16982 #endif
16983
16984 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
16985 +
16986 /* Networking helper routines. */
16987 EXPORT_SYMBOL(csum_partial_copy_generic);
16988 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16989 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16990
16991 EXPORT_SYMBOL(__get_user_1);
16992 EXPORT_SYMBOL(__get_user_2);
16993 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16994
16995 EXPORT_SYMBOL(csum_partial);
16996 EXPORT_SYMBOL(empty_zero_page);
16997 +
16998 +#ifdef CONFIG_PAX_KERNEXEC
16999 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17000 +#endif
17001 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17002 index 739d859..d1d6be7 100644
17003 --- a/arch/x86/kernel/i387.c
17004 +++ b/arch/x86/kernel/i387.c
17005 @@ -188,6 +188,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
17006
17007 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17008 unsigned int pos, unsigned int count,
17009 + void *kbuf, void __user *ubuf) __size_overflow(4);
17010 +int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17011 + unsigned int pos, unsigned int count,
17012 void *kbuf, void __user *ubuf)
17013 {
17014 int ret;
17015 @@ -207,6 +210,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17016
17017 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17018 unsigned int pos, unsigned int count,
17019 + const void *kbuf, const void __user *ubuf) __size_overflow(4);
17020 +int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17021 + unsigned int pos, unsigned int count,
17022 const void *kbuf, const void __user *ubuf)
17023 {
17024 int ret;
17025 @@ -240,6 +246,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17026
17027 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17028 unsigned int pos, unsigned int count,
17029 + void *kbuf, void __user *ubuf) __size_overflow(4);
17030 +int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17031 + unsigned int pos, unsigned int count,
17032 void *kbuf, void __user *ubuf)
17033 {
17034 int ret;
17035 @@ -269,6 +278,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17036
17037 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17038 unsigned int pos, unsigned int count,
17039 + const void *kbuf, const void __user *ubuf) __size_overflow(4);
17040 +int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17041 + unsigned int pos, unsigned int count,
17042 const void *kbuf, const void __user *ubuf)
17043 {
17044 int ret;
17045 @@ -439,6 +451,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
17046
17047 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17048 unsigned int pos, unsigned int count,
17049 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
17050 +int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17051 + unsigned int pos, unsigned int count,
17052 void *kbuf, void __user *ubuf)
17053 {
17054 struct user_i387_ia32_struct env;
17055 @@ -471,6 +486,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17056
17057 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17058 unsigned int pos, unsigned int count,
17059 + const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
17060 +int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17061 + unsigned int pos, unsigned int count,
17062 const void *kbuf, const void __user *ubuf)
17063 {
17064 struct user_i387_ia32_struct env;
17065 @@ -619,6 +637,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
17066 }
17067
17068 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17069 + unsigned int size) __size_overflow(2);
17070 +static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17071 unsigned int size)
17072 {
17073 struct task_struct *tsk = current;
17074 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17075 index 6104852..6114160 100644
17076 --- a/arch/x86/kernel/i8259.c
17077 +++ b/arch/x86/kernel/i8259.c
17078 @@ -210,7 +210,7 @@ spurious_8259A_irq:
17079 "spurious 8259A interrupt: IRQ%d.\n", irq);
17080 spurious_irq_mask |= irqmask;
17081 }
17082 - atomic_inc(&irq_err_count);
17083 + atomic_inc_unchecked(&irq_err_count);
17084 /*
17085 * Theoretically we do not have to handle this IRQ,
17086 * but in Linux this does not cause problems and is
17087 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17088 index 43e9ccf..44ccf6f 100644
17089 --- a/arch/x86/kernel/init_task.c
17090 +++ b/arch/x86/kernel/init_task.c
17091 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17092 * way process stacks are handled. This is done by having a special
17093 * "init_task" linker map entry..
17094 */
17095 -union thread_union init_thread_union __init_task_data =
17096 - { INIT_THREAD_INFO(init_task) };
17097 +union thread_union init_thread_union __init_task_data;
17098
17099 /*
17100 * Initial task structure.
17101 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17102 * section. Since TSS's are completely CPU-local, we want them
17103 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17104 */
17105 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17106 -
17107 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17108 +EXPORT_SYMBOL(init_tss);
17109 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17110 index 8c96897..be66bfa 100644
17111 --- a/arch/x86/kernel/ioport.c
17112 +++ b/arch/x86/kernel/ioport.c
17113 @@ -6,6 +6,7 @@
17114 #include <linux/sched.h>
17115 #include <linux/kernel.h>
17116 #include <linux/capability.h>
17117 +#include <linux/security.h>
17118 #include <linux/errno.h>
17119 #include <linux/types.h>
17120 #include <linux/ioport.h>
17121 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17122
17123 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17124 return -EINVAL;
17125 +#ifdef CONFIG_GRKERNSEC_IO
17126 + if (turn_on && grsec_disable_privio) {
17127 + gr_handle_ioperm();
17128 + return -EPERM;
17129 + }
17130 +#endif
17131 if (turn_on && !capable(CAP_SYS_RAWIO))
17132 return -EPERM;
17133
17134 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17135 * because the ->io_bitmap_max value must match the bitmap
17136 * contents:
17137 */
17138 - tss = &per_cpu(init_tss, get_cpu());
17139 + tss = init_tss + get_cpu();
17140
17141 if (turn_on)
17142 bitmap_clear(t->io_bitmap_ptr, from, num);
17143 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17144 return -EINVAL;
17145 /* Trying to gain more privileges? */
17146 if (level > old) {
17147 +#ifdef CONFIG_GRKERNSEC_IO
17148 + if (grsec_disable_privio) {
17149 + gr_handle_iopl();
17150 + return -EPERM;
17151 + }
17152 +#endif
17153 if (!capable(CAP_SYS_RAWIO))
17154 return -EPERM;
17155 }
17156 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17157 index 7943e0c..dd32c5c 100644
17158 --- a/arch/x86/kernel/irq.c
17159 +++ b/arch/x86/kernel/irq.c
17160 @@ -18,7 +18,7 @@
17161 #include <asm/mce.h>
17162 #include <asm/hw_irq.h>
17163
17164 -atomic_t irq_err_count;
17165 +atomic_unchecked_t irq_err_count;
17166
17167 /* Function pointer for generic interrupt vector handling */
17168 void (*x86_platform_ipi_callback)(void) = NULL;
17169 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17170 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17171 seq_printf(p, " Machine check polls\n");
17172 #endif
17173 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17174 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17175 #if defined(CONFIG_X86_IO_APIC)
17176 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17177 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17178 #endif
17179 return 0;
17180 }
17181 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17182
17183 u64 arch_irq_stat(void)
17184 {
17185 - u64 sum = atomic_read(&irq_err_count);
17186 + u64 sum = atomic_read_unchecked(&irq_err_count);
17187
17188 #ifdef CONFIG_X86_IO_APIC
17189 - sum += atomic_read(&irq_mis_count);
17190 + sum += atomic_read_unchecked(&irq_mis_count);
17191 #endif
17192 return sum;
17193 }
17194 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17195 index 40fc861..9b8739b 100644
17196 --- a/arch/x86/kernel/irq_32.c
17197 +++ b/arch/x86/kernel/irq_32.c
17198 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17199 __asm__ __volatile__("andl %%esp,%0" :
17200 "=r" (sp) : "0" (THREAD_SIZE - 1));
17201
17202 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17203 + return sp < STACK_WARN;
17204 }
17205
17206 static void print_stack_overflow(void)
17207 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17208 * per-CPU IRQ handling contexts (thread information and stack)
17209 */
17210 union irq_ctx {
17211 - struct thread_info tinfo;
17212 - u32 stack[THREAD_SIZE/sizeof(u32)];
17213 + unsigned long previous_esp;
17214 + u32 stack[THREAD_SIZE/sizeof(u32)];
17215 } __attribute__((aligned(THREAD_SIZE)));
17216
17217 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17218 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17219 static inline int
17220 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17221 {
17222 - union irq_ctx *curctx, *irqctx;
17223 + union irq_ctx *irqctx;
17224 u32 *isp, arg1, arg2;
17225
17226 - curctx = (union irq_ctx *) current_thread_info();
17227 irqctx = __this_cpu_read(hardirq_ctx);
17228
17229 /*
17230 @@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17231 * handler) we can't do that and just have to keep using the
17232 * current stack (which is the irq stack already after all)
17233 */
17234 - if (unlikely(curctx == irqctx))
17235 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17236 return 0;
17237
17238 /* build the stack frame on the IRQ stack */
17239 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17240 - irqctx->tinfo.task = curctx->tinfo.task;
17241 - irqctx->tinfo.previous_esp = current_stack_pointer;
17242 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17243 + irqctx->previous_esp = current_stack_pointer;
17244
17245 - /*
17246 - * Copy the softirq bits in preempt_count so that the
17247 - * softirq checks work in the hardirq context.
17248 - */
17249 - irqctx->tinfo.preempt_count =
17250 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17251 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17252 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17253 + __set_fs(MAKE_MM_SEG(0));
17254 +#endif
17255
17256 if (unlikely(overflow))
17257 call_on_stack(print_stack_overflow, isp);
17258 @@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17259 : "0" (irq), "1" (desc), "2" (isp),
17260 "D" (desc->handle_irq)
17261 : "memory", "cc", "ecx");
17262 +
17263 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17264 + __set_fs(current_thread_info()->addr_limit);
17265 +#endif
17266 +
17267 return 1;
17268 }
17269
17270 @@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17271 */
17272 void __cpuinit irq_ctx_init(int cpu)
17273 {
17274 - union irq_ctx *irqctx;
17275 -
17276 if (per_cpu(hardirq_ctx, cpu))
17277 return;
17278
17279 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17280 - THREAD_FLAGS,
17281 - THREAD_ORDER));
17282 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17283 - irqctx->tinfo.cpu = cpu;
17284 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17285 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17286 -
17287 - per_cpu(hardirq_ctx, cpu) = irqctx;
17288 -
17289 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17290 - THREAD_FLAGS,
17291 - THREAD_ORDER));
17292 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17293 - irqctx->tinfo.cpu = cpu;
17294 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17295 -
17296 - per_cpu(softirq_ctx, cpu) = irqctx;
17297 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17298 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17299
17300 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17301 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17302 @@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17303 asmlinkage void do_softirq(void)
17304 {
17305 unsigned long flags;
17306 - struct thread_info *curctx;
17307 union irq_ctx *irqctx;
17308 u32 *isp;
17309
17310 @@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17311 local_irq_save(flags);
17312
17313 if (local_softirq_pending()) {
17314 - curctx = current_thread_info();
17315 irqctx = __this_cpu_read(softirq_ctx);
17316 - irqctx->tinfo.task = curctx->task;
17317 - irqctx->tinfo.previous_esp = current_stack_pointer;
17318 + irqctx->previous_esp = current_stack_pointer;
17319
17320 /* build the stack frame on the softirq stack */
17321 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17322 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17323 +
17324 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17325 + __set_fs(MAKE_MM_SEG(0));
17326 +#endif
17327
17328 call_on_stack(__do_softirq, isp);
17329 +
17330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17331 + __set_fs(current_thread_info()->addr_limit);
17332 +#endif
17333 +
17334 /*
17335 * Shouldn't happen, we returned above if in_interrupt():
17336 */
17337 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17338 index d04d3ec..ea4b374 100644
17339 --- a/arch/x86/kernel/irq_64.c
17340 +++ b/arch/x86/kernel/irq_64.c
17341 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17342 u64 estack_top, estack_bottom;
17343 u64 curbase = (u64)task_stack_page(current);
17344
17345 - if (user_mode_vm(regs))
17346 + if (user_mode(regs))
17347 return;
17348
17349 if (regs->sp >= curbase + sizeof(struct thread_info) +
17350 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17351 index 90fcf62..e682cdd 100644
17352 --- a/arch/x86/kernel/kdebugfs.c
17353 +++ b/arch/x86/kernel/kdebugfs.c
17354 @@ -28,6 +28,8 @@ struct setup_data_node {
17355 };
17356
17357 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17358 + size_t count, loff_t *ppos) __size_overflow(3);
17359 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17360 size_t count, loff_t *ppos)
17361 {
17362 struct setup_data_node *node = file->private_data;
17363 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17364 index 2f45c4c..d95504f 100644
17365 --- a/arch/x86/kernel/kgdb.c
17366 +++ b/arch/x86/kernel/kgdb.c
17367 @@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17368 #ifdef CONFIG_X86_32
17369 switch (regno) {
17370 case GDB_SS:
17371 - if (!user_mode_vm(regs))
17372 + if (!user_mode(regs))
17373 *(unsigned long *)mem = __KERNEL_DS;
17374 break;
17375 case GDB_SP:
17376 - if (!user_mode_vm(regs))
17377 + if (!user_mode(regs))
17378 *(unsigned long *)mem = kernel_stack_pointer(regs);
17379 break;
17380 case GDB_GS:
17381 @@ -475,12 +475,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17382 case 'k':
17383 /* clear the trace bit */
17384 linux_regs->flags &= ~X86_EFLAGS_TF;
17385 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17386 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17387
17388 /* set the trace bit if we're stepping */
17389 if (remcomInBuffer[0] == 's') {
17390 linux_regs->flags |= X86_EFLAGS_TF;
17391 - atomic_set(&kgdb_cpu_doing_single_step,
17392 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17393 raw_smp_processor_id());
17394 }
17395
17396 @@ -545,7 +545,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17397
17398 switch (cmd) {
17399 case DIE_DEBUG:
17400 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17401 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17402 if (user_mode(regs))
17403 return single_step_cont(regs, args);
17404 break;
17405 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17406 index 7da647d..56fe348 100644
17407 --- a/arch/x86/kernel/kprobes.c
17408 +++ b/arch/x86/kernel/kprobes.c
17409 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17410 } __attribute__((packed)) *insn;
17411
17412 insn = (struct __arch_relative_insn *)from;
17413 +
17414 + pax_open_kernel();
17415 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17416 insn->op = op;
17417 + pax_close_kernel();
17418 }
17419
17420 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17421 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17422 kprobe_opcode_t opcode;
17423 kprobe_opcode_t *orig_opcodes = opcodes;
17424
17425 - if (search_exception_tables((unsigned long)opcodes))
17426 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17427 return 0; /* Page fault may occur on this address. */
17428
17429 retry:
17430 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17431 }
17432 }
17433 insn_get_length(&insn);
17434 + pax_open_kernel();
17435 memcpy(dest, insn.kaddr, insn.length);
17436 + pax_close_kernel();
17437
17438 #ifdef CONFIG_X86_64
17439 if (insn_rip_relative(&insn)) {
17440 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17441 (u8 *) dest;
17442 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17443 disp = (u8 *) dest + insn_offset_displacement(&insn);
17444 + pax_open_kernel();
17445 *(s32 *) disp = (s32) newdisp;
17446 + pax_close_kernel();
17447 }
17448 #endif
17449 return insn.length;
17450 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17451 */
17452 __copy_instruction(p->ainsn.insn, p->addr, 0);
17453
17454 - if (can_boost(p->addr))
17455 + if (can_boost(ktla_ktva(p->addr)))
17456 p->ainsn.boostable = 0;
17457 else
17458 p->ainsn.boostable = -1;
17459
17460 - p->opcode = *p->addr;
17461 + p->opcode = *(ktla_ktva(p->addr));
17462 }
17463
17464 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17465 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17466 * nor set current_kprobe, because it doesn't use single
17467 * stepping.
17468 */
17469 - regs->ip = (unsigned long)p->ainsn.insn;
17470 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17471 preempt_enable_no_resched();
17472 return;
17473 }
17474 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17475 if (p->opcode == BREAKPOINT_INSTRUCTION)
17476 regs->ip = (unsigned long)p->addr;
17477 else
17478 - regs->ip = (unsigned long)p->ainsn.insn;
17479 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17480 }
17481
17482 /*
17483 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17484 setup_singlestep(p, regs, kcb, 0);
17485 return 1;
17486 }
17487 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17488 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17489 /*
17490 * The breakpoint instruction was removed right
17491 * after we hit it. Another cpu has removed
17492 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17493 " movq %rax, 152(%rsp)\n"
17494 RESTORE_REGS_STRING
17495 " popfq\n"
17496 +#ifdef KERNEXEC_PLUGIN
17497 + " btsq $63,(%rsp)\n"
17498 +#endif
17499 #else
17500 " pushf\n"
17501 SAVE_REGS_STRING
17502 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17503 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17504 {
17505 unsigned long *tos = stack_addr(regs);
17506 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17507 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17508 unsigned long orig_ip = (unsigned long)p->addr;
17509 kprobe_opcode_t *insn = p->ainsn.insn;
17510
17511 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17512 struct die_args *args = data;
17513 int ret = NOTIFY_DONE;
17514
17515 - if (args->regs && user_mode_vm(args->regs))
17516 + if (args->regs && user_mode(args->regs))
17517 return ret;
17518
17519 switch (val) {
17520 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17521 * Verify if the address gap is in 2GB range, because this uses
17522 * a relative jump.
17523 */
17524 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17525 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17526 if (abs(rel) > 0x7fffffff)
17527 return -ERANGE;
17528
17529 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17530 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17531
17532 /* Set probe function call */
17533 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17534 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17535
17536 /* Set returning jmp instruction at the tail of out-of-line buffer */
17537 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17538 - (u8 *)op->kp.addr + op->optinsn.size);
17539 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17540
17541 flush_icache_range((unsigned long) buf,
17542 (unsigned long) buf + TMPL_END_IDX +
17543 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17544 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17545
17546 /* Backup instructions which will be replaced by jump address */
17547 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17548 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17549 RELATIVE_ADDR_SIZE);
17550
17551 insn_buf[0] = RELATIVEJUMP_OPCODE;
17552 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17553 index ea69726..a305f16 100644
17554 --- a/arch/x86/kernel/ldt.c
17555 +++ b/arch/x86/kernel/ldt.c
17556 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17557 if (reload) {
17558 #ifdef CONFIG_SMP
17559 preempt_disable();
17560 - load_LDT(pc);
17561 + load_LDT_nolock(pc);
17562 if (!cpumask_equal(mm_cpumask(current->mm),
17563 cpumask_of(smp_processor_id())))
17564 smp_call_function(flush_ldt, current->mm, 1);
17565 preempt_enable();
17566 #else
17567 - load_LDT(pc);
17568 + load_LDT_nolock(pc);
17569 #endif
17570 }
17571 if (oldsize) {
17572 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17573 return err;
17574
17575 for (i = 0; i < old->size; i++)
17576 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17577 + write_ldt_entry(new->ldt, i, old->ldt + i);
17578 return 0;
17579 }
17580
17581 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17582 retval = copy_ldt(&mm->context, &old_mm->context);
17583 mutex_unlock(&old_mm->context.lock);
17584 }
17585 +
17586 + if (tsk == current) {
17587 + mm->context.vdso = 0;
17588 +
17589 +#ifdef CONFIG_X86_32
17590 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17591 + mm->context.user_cs_base = 0UL;
17592 + mm->context.user_cs_limit = ~0UL;
17593 +
17594 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17595 + cpus_clear(mm->context.cpu_user_cs_mask);
17596 +#endif
17597 +
17598 +#endif
17599 +#endif
17600 +
17601 + }
17602 +
17603 return retval;
17604 }
17605
17606 @@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm)
17607 }
17608 }
17609
17610 +static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17611 static int read_ldt(void __user *ptr, unsigned long bytecount)
17612 {
17613 int err;
17614 @@ -175,6 +194,7 @@ error_return:
17615 return err;
17616 }
17617
17618 +static int read_default_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17619 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
17620 {
17621 /* CHECKME: Can we use _one_ random number ? */
17622 @@ -230,6 +250,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17623 }
17624 }
17625
17626 +#ifdef CONFIG_PAX_SEGMEXEC
17627 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17628 + error = -EINVAL;
17629 + goto out_unlock;
17630 + }
17631 +#endif
17632 +
17633 fill_ldt(&ldt, &ldt_info);
17634 if (oldmode)
17635 ldt.avl = 0;
17636 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17637 index a3fa43b..8966f4c 100644
17638 --- a/arch/x86/kernel/machine_kexec_32.c
17639 +++ b/arch/x86/kernel/machine_kexec_32.c
17640 @@ -27,7 +27,7 @@
17641 #include <asm/cacheflush.h>
17642 #include <asm/debugreg.h>
17643
17644 -static void set_idt(void *newidt, __u16 limit)
17645 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17646 {
17647 struct desc_ptr curidt;
17648
17649 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17650 }
17651
17652
17653 -static void set_gdt(void *newgdt, __u16 limit)
17654 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17655 {
17656 struct desc_ptr curgdt;
17657
17658 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17659 }
17660
17661 control_page = page_address(image->control_code_page);
17662 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17663 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17664
17665 relocate_kernel_ptr = control_page;
17666 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17667 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17668 index 3ca42d0..79d24cd 100644
17669 --- a/arch/x86/kernel/microcode_intel.c
17670 +++ b/arch/x86/kernel/microcode_intel.c
17671 @@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17672 return ret;
17673 }
17674
17675 +static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
17676 static int get_ucode_user(void *to, const void *from, size_t n)
17677 {
17678 - return copy_from_user(to, from, n);
17679 + return copy_from_user(to, (const void __force_user *)from, n);
17680 }
17681
17682 static enum ucode_state
17683 request_microcode_user(int cpu, const void __user *buf, size_t size)
17684 {
17685 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17686 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17687 }
17688
17689 static void microcode_fini_cpu(int cpu)
17690 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17691 index 925179f..1f0d561 100644
17692 --- a/arch/x86/kernel/module.c
17693 +++ b/arch/x86/kernel/module.c
17694 @@ -36,15 +36,61 @@
17695 #define DEBUGP(fmt...)
17696 #endif
17697
17698 -void *module_alloc(unsigned long size)
17699 +static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
17700 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17701 {
17702 - if (PAGE_ALIGN(size) > MODULES_LEN)
17703 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17704 return NULL;
17705 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17706 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17707 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17708 -1, __builtin_return_address(0));
17709 }
17710
17711 +void *module_alloc(unsigned long size)
17712 +{
17713 +
17714 +#ifdef CONFIG_PAX_KERNEXEC
17715 + return __module_alloc(size, PAGE_KERNEL);
17716 +#else
17717 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17718 +#endif
17719 +
17720 +}
17721 +
17722 +#ifdef CONFIG_PAX_KERNEXEC
17723 +#ifdef CONFIG_X86_32
17724 +void *module_alloc_exec(unsigned long size)
17725 +{
17726 + struct vm_struct *area;
17727 +
17728 + if (size == 0)
17729 + return NULL;
17730 +
17731 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17732 + return area ? area->addr : NULL;
17733 +}
17734 +EXPORT_SYMBOL(module_alloc_exec);
17735 +
17736 +void module_free_exec(struct module *mod, void *module_region)
17737 +{
17738 + vunmap(module_region);
17739 +}
17740 +EXPORT_SYMBOL(module_free_exec);
17741 +#else
17742 +void module_free_exec(struct module *mod, void *module_region)
17743 +{
17744 + module_free(mod, module_region);
17745 +}
17746 +EXPORT_SYMBOL(module_free_exec);
17747 +
17748 +void *module_alloc_exec(unsigned long size)
17749 +{
17750 + return __module_alloc(size, PAGE_KERNEL_RX);
17751 +}
17752 +EXPORT_SYMBOL(module_alloc_exec);
17753 +#endif
17754 +#endif
17755 +
17756 #ifdef CONFIG_X86_32
17757 int apply_relocate(Elf32_Shdr *sechdrs,
17758 const char *strtab,
17759 @@ -55,14 +101,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17760 unsigned int i;
17761 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17762 Elf32_Sym *sym;
17763 - uint32_t *location;
17764 + uint32_t *plocation, location;
17765
17766 DEBUGP("Applying relocate section %u to %u\n", relsec,
17767 sechdrs[relsec].sh_info);
17768 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17769 /* This is where to make the change */
17770 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17771 - + rel[i].r_offset;
17772 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17773 + location = (uint32_t)plocation;
17774 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17775 + plocation = ktla_ktva((void *)plocation);
17776 /* This is the symbol it is referring to. Note that all
17777 undefined symbols have been resolved. */
17778 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17779 @@ -71,11 +119,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17780 switch (ELF32_R_TYPE(rel[i].r_info)) {
17781 case R_386_32:
17782 /* We add the value into the location given */
17783 - *location += sym->st_value;
17784 + pax_open_kernel();
17785 + *plocation += sym->st_value;
17786 + pax_close_kernel();
17787 break;
17788 case R_386_PC32:
17789 /* Add the value, subtract its postition */
17790 - *location += sym->st_value - (uint32_t)location;
17791 + pax_open_kernel();
17792 + *plocation += sym->st_value - location;
17793 + pax_close_kernel();
17794 break;
17795 default:
17796 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17797 @@ -120,21 +172,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17798 case R_X86_64_NONE:
17799 break;
17800 case R_X86_64_64:
17801 + pax_open_kernel();
17802 *(u64 *)loc = val;
17803 + pax_close_kernel();
17804 break;
17805 case R_X86_64_32:
17806 + pax_open_kernel();
17807 *(u32 *)loc = val;
17808 + pax_close_kernel();
17809 if (val != *(u32 *)loc)
17810 goto overflow;
17811 break;
17812 case R_X86_64_32S:
17813 + pax_open_kernel();
17814 *(s32 *)loc = val;
17815 + pax_close_kernel();
17816 if ((s64)val != *(s32 *)loc)
17817 goto overflow;
17818 break;
17819 case R_X86_64_PC32:
17820 val -= (u64)loc;
17821 + pax_open_kernel();
17822 *(u32 *)loc = val;
17823 + pax_close_kernel();
17824 +
17825 #if 0
17826 if ((s64)val != *(s32 *)loc)
17827 goto overflow;
17828 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17829 index 47acaf3..ec48ab6 100644
17830 --- a/arch/x86/kernel/nmi.c
17831 +++ b/arch/x86/kernel/nmi.c
17832 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
17833 dotraplinkage notrace __kprobes void
17834 do_nmi(struct pt_regs *regs, long error_code)
17835 {
17836 +
17837 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17838 + if (!user_mode(regs)) {
17839 + unsigned long cs = regs->cs & 0xFFFF;
17840 + unsigned long ip = ktva_ktla(regs->ip);
17841 +
17842 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17843 + regs->ip = ip;
17844 + }
17845 +#endif
17846 +
17847 nmi_nesting_preprocess(regs);
17848
17849 nmi_enter();
17850 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17851 index 676b8c7..870ba04 100644
17852 --- a/arch/x86/kernel/paravirt-spinlocks.c
17853 +++ b/arch/x86/kernel/paravirt-spinlocks.c
17854 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17855 arch_spin_lock(lock);
17856 }
17857
17858 -struct pv_lock_ops pv_lock_ops = {
17859 +struct pv_lock_ops pv_lock_ops __read_only = {
17860 #ifdef CONFIG_SMP
17861 .spin_is_locked = __ticket_spin_is_locked,
17862 .spin_is_contended = __ticket_spin_is_contended,
17863 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17864 index d90272e..6bb013b 100644
17865 --- a/arch/x86/kernel/paravirt.c
17866 +++ b/arch/x86/kernel/paravirt.c
17867 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17868 {
17869 return x;
17870 }
17871 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17872 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17873 +#endif
17874
17875 void __init default_banner(void)
17876 {
17877 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17878 if (opfunc == NULL)
17879 /* If there's no function, patch it with a ud2a (BUG) */
17880 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17881 - else if (opfunc == _paravirt_nop)
17882 + else if (opfunc == (void *)_paravirt_nop)
17883 /* If the operation is a nop, then nop the callsite */
17884 ret = paravirt_patch_nop();
17885
17886 /* identity functions just return their single argument */
17887 - else if (opfunc == _paravirt_ident_32)
17888 + else if (opfunc == (void *)_paravirt_ident_32)
17889 ret = paravirt_patch_ident_32(insnbuf, len);
17890 - else if (opfunc == _paravirt_ident_64)
17891 + else if (opfunc == (void *)_paravirt_ident_64)
17892 ret = paravirt_patch_ident_64(insnbuf, len);
17893 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17894 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17895 + ret = paravirt_patch_ident_64(insnbuf, len);
17896 +#endif
17897
17898 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17899 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17900 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17901 if (insn_len > len || start == NULL)
17902 insn_len = len;
17903 else
17904 - memcpy(insnbuf, start, insn_len);
17905 + memcpy(insnbuf, ktla_ktva(start), insn_len);
17906
17907 return insn_len;
17908 }
17909 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17910 preempt_enable();
17911 }
17912
17913 -struct pv_info pv_info = {
17914 +struct pv_info pv_info __read_only = {
17915 .name = "bare hardware",
17916 .paravirt_enabled = 0,
17917 .kernel_rpl = 0,
17918 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
17919 #endif
17920 };
17921
17922 -struct pv_init_ops pv_init_ops = {
17923 +struct pv_init_ops pv_init_ops __read_only = {
17924 .patch = native_patch,
17925 };
17926
17927 -struct pv_time_ops pv_time_ops = {
17928 +struct pv_time_ops pv_time_ops __read_only = {
17929 .sched_clock = native_sched_clock,
17930 .steal_clock = native_steal_clock,
17931 };
17932
17933 -struct pv_irq_ops pv_irq_ops = {
17934 +struct pv_irq_ops pv_irq_ops __read_only = {
17935 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17936 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17937 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17938 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17939 #endif
17940 };
17941
17942 -struct pv_cpu_ops pv_cpu_ops = {
17943 +struct pv_cpu_ops pv_cpu_ops __read_only = {
17944 .cpuid = native_cpuid,
17945 .get_debugreg = native_get_debugreg,
17946 .set_debugreg = native_set_debugreg,
17947 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17948 .end_context_switch = paravirt_nop,
17949 };
17950
17951 -struct pv_apic_ops pv_apic_ops = {
17952 +struct pv_apic_ops pv_apic_ops __read_only = {
17953 #ifdef CONFIG_X86_LOCAL_APIC
17954 .startup_ipi_hook = paravirt_nop,
17955 #endif
17956 };
17957
17958 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17959 +#ifdef CONFIG_X86_32
17960 +#ifdef CONFIG_X86_PAE
17961 +/* 64-bit pagetable entries */
17962 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17963 +#else
17964 /* 32-bit pagetable entries */
17965 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17966 +#endif
17967 #else
17968 /* 64-bit pagetable entries */
17969 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17970 #endif
17971
17972 -struct pv_mmu_ops pv_mmu_ops = {
17973 +struct pv_mmu_ops pv_mmu_ops __read_only = {
17974
17975 .read_cr2 = native_read_cr2,
17976 .write_cr2 = native_write_cr2,
17977 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17978 .make_pud = PTE_IDENT,
17979
17980 .set_pgd = native_set_pgd,
17981 + .set_pgd_batched = native_set_pgd_batched,
17982 #endif
17983 #endif /* PAGETABLE_LEVELS >= 3 */
17984
17985 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17986 },
17987
17988 .set_fixmap = native_set_fixmap,
17989 +
17990 +#ifdef CONFIG_PAX_KERNEXEC
17991 + .pax_open_kernel = native_pax_open_kernel,
17992 + .pax_close_kernel = native_pax_close_kernel,
17993 +#endif
17994 +
17995 };
17996
17997 EXPORT_SYMBOL_GPL(pv_time_ops);
17998 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17999 index 35ccf75..7a15747 100644
18000 --- a/arch/x86/kernel/pci-iommu_table.c
18001 +++ b/arch/x86/kernel/pci-iommu_table.c
18002 @@ -2,7 +2,7 @@
18003 #include <asm/iommu_table.h>
18004 #include <linux/string.h>
18005 #include <linux/kallsyms.h>
18006 -
18007 +#include <linux/sched.h>
18008
18009 #define DEBUG 1
18010
18011 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18012 index 15763af..da59ada 100644
18013 --- a/arch/x86/kernel/process.c
18014 +++ b/arch/x86/kernel/process.c
18015 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18016
18017 void free_thread_info(struct thread_info *ti)
18018 {
18019 - free_thread_xstate(ti->task);
18020 free_pages((unsigned long)ti, THREAD_ORDER);
18021 }
18022
18023 +static struct kmem_cache *task_struct_cachep;
18024 +
18025 void arch_task_cache_init(void)
18026 {
18027 - task_xstate_cachep =
18028 - kmem_cache_create("task_xstate", xstate_size,
18029 + /* create a slab on which task_structs can be allocated */
18030 + task_struct_cachep =
18031 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18032 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18033 +
18034 + task_xstate_cachep =
18035 + kmem_cache_create("task_xstate", xstate_size,
18036 __alignof__(union thread_xstate),
18037 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18038 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18039 +}
18040 +
18041 +struct task_struct *alloc_task_struct_node(int node)
18042 +{
18043 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18044 +}
18045 +
18046 +void free_task_struct(struct task_struct *task)
18047 +{
18048 + free_thread_xstate(task);
18049 + kmem_cache_free(task_struct_cachep, task);
18050 }
18051
18052 /*
18053 @@ -70,7 +87,7 @@ void exit_thread(void)
18054 unsigned long *bp = t->io_bitmap_ptr;
18055
18056 if (bp) {
18057 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18058 + struct tss_struct *tss = init_tss + get_cpu();
18059
18060 t->io_bitmap_ptr = NULL;
18061 clear_thread_flag(TIF_IO_BITMAP);
18062 @@ -106,7 +123,7 @@ void show_regs_common(void)
18063
18064 printk(KERN_CONT "\n");
18065 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18066 - current->pid, current->comm, print_tainted(),
18067 + task_pid_nr(current), current->comm, print_tainted(),
18068 init_utsname()->release,
18069 (int)strcspn(init_utsname()->version, " "),
18070 init_utsname()->version);
18071 @@ -120,6 +137,9 @@ void flush_thread(void)
18072 {
18073 struct task_struct *tsk = current;
18074
18075 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18076 + loadsegment(gs, 0);
18077 +#endif
18078 flush_ptrace_hw_breakpoint(tsk);
18079 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18080 /*
18081 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18082 regs.di = (unsigned long) arg;
18083
18084 #ifdef CONFIG_X86_32
18085 - regs.ds = __USER_DS;
18086 - regs.es = __USER_DS;
18087 + regs.ds = __KERNEL_DS;
18088 + regs.es = __KERNEL_DS;
18089 regs.fs = __KERNEL_PERCPU;
18090 - regs.gs = __KERNEL_STACK_CANARY;
18091 + savesegment(gs, regs.gs);
18092 #else
18093 regs.ss = __KERNEL_DS;
18094 #endif
18095 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
18096
18097 return ret;
18098 }
18099 -void stop_this_cpu(void *dummy)
18100 +__noreturn void stop_this_cpu(void *dummy)
18101 {
18102 local_irq_disable();
18103 /*
18104 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
18105 }
18106 early_param("idle", idle_setup);
18107
18108 -unsigned long arch_align_stack(unsigned long sp)
18109 +#ifdef CONFIG_PAX_RANDKSTACK
18110 +void pax_randomize_kstack(struct pt_regs *regs)
18111 {
18112 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18113 - sp -= get_random_int() % 8192;
18114 - return sp & ~0xf;
18115 -}
18116 + struct thread_struct *thread = &current->thread;
18117 + unsigned long time;
18118
18119 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18120 -{
18121 - unsigned long range_end = mm->brk + 0x02000000;
18122 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18123 -}
18124 + if (!randomize_va_space)
18125 + return;
18126 +
18127 + if (v8086_mode(regs))
18128 + return;
18129
18130 + rdtscl(time);
18131 +
18132 + /* P4 seems to return a 0 LSB, ignore it */
18133 +#ifdef CONFIG_MPENTIUM4
18134 + time &= 0x3EUL;
18135 + time <<= 2;
18136 +#elif defined(CONFIG_X86_64)
18137 + time &= 0xFUL;
18138 + time <<= 4;
18139 +#else
18140 + time &= 0x1FUL;
18141 + time <<= 3;
18142 +#endif
18143 +
18144 + thread->sp0 ^= time;
18145 + load_sp0(init_tss + smp_processor_id(), thread);
18146 +
18147 +#ifdef CONFIG_X86_64
18148 + percpu_write(kernel_stack, thread->sp0);
18149 +#endif
18150 +}
18151 +#endif
18152 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18153 index c08d1ff..6ae1c81 100644
18154 --- a/arch/x86/kernel/process_32.c
18155 +++ b/arch/x86/kernel/process_32.c
18156 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18157 unsigned long thread_saved_pc(struct task_struct *tsk)
18158 {
18159 return ((unsigned long *)tsk->thread.sp)[3];
18160 +//XXX return tsk->thread.eip;
18161 }
18162
18163 #ifndef CONFIG_SMP
18164 @@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
18165 unsigned long sp;
18166 unsigned short ss, gs;
18167
18168 - if (user_mode_vm(regs)) {
18169 + if (user_mode(regs)) {
18170 sp = regs->sp;
18171 ss = regs->ss & 0xffff;
18172 - gs = get_user_gs(regs);
18173 } else {
18174 sp = kernel_stack_pointer(regs);
18175 savesegment(ss, ss);
18176 - savesegment(gs, gs);
18177 }
18178 + gs = get_user_gs(regs);
18179
18180 show_regs_common();
18181
18182 @@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18183 struct task_struct *tsk;
18184 int err;
18185
18186 - childregs = task_pt_regs(p);
18187 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18188 *childregs = *regs;
18189 childregs->ax = 0;
18190 childregs->sp = sp;
18191
18192 p->thread.sp = (unsigned long) childregs;
18193 p->thread.sp0 = (unsigned long) (childregs+1);
18194 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18195
18196 p->thread.ip = (unsigned long) ret_from_fork;
18197
18198 @@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18199 struct thread_struct *prev = &prev_p->thread,
18200 *next = &next_p->thread;
18201 int cpu = smp_processor_id();
18202 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18203 + struct tss_struct *tss = init_tss + cpu;
18204 fpu_switch_t fpu;
18205
18206 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18207 @@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18208 */
18209 lazy_save_gs(prev->gs);
18210
18211 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18212 + __set_fs(task_thread_info(next_p)->addr_limit);
18213 +#endif
18214 +
18215 /*
18216 * Load the per-thread Thread-Local Storage descriptor.
18217 */
18218 @@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18219 */
18220 arch_end_context_switch(next_p);
18221
18222 + percpu_write(current_task, next_p);
18223 + percpu_write(current_tinfo, &next_p->tinfo);
18224 +
18225 /*
18226 * Restore %gs if needed (which is common)
18227 */
18228 @@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18229
18230 switch_fpu_finish(next_p, fpu);
18231
18232 - percpu_write(current_task, next_p);
18233 -
18234 return prev_p;
18235 }
18236
18237 @@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
18238 } while (count++ < 16);
18239 return 0;
18240 }
18241 -
18242 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18243 index cfa5c90..4facd28 100644
18244 --- a/arch/x86/kernel/process_64.c
18245 +++ b/arch/x86/kernel/process_64.c
18246 @@ -89,7 +89,7 @@ static void __exit_idle(void)
18247 void exit_idle(void)
18248 {
18249 /* idle loop has pid 0 */
18250 - if (current->pid)
18251 + if (task_pid_nr(current))
18252 return;
18253 __exit_idle();
18254 }
18255 @@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18256 struct pt_regs *childregs;
18257 struct task_struct *me = current;
18258
18259 - childregs = ((struct pt_regs *)
18260 - (THREAD_SIZE + task_stack_page(p))) - 1;
18261 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18262 *childregs = *regs;
18263
18264 childregs->ax = 0;
18265 @@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18266 p->thread.sp = (unsigned long) childregs;
18267 p->thread.sp0 = (unsigned long) (childregs+1);
18268 p->thread.usersp = me->thread.usersp;
18269 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18270
18271 set_tsk_thread_flag(p, TIF_FORK);
18272
18273 @@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18274 struct thread_struct *prev = &prev_p->thread;
18275 struct thread_struct *next = &next_p->thread;
18276 int cpu = smp_processor_id();
18277 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18278 + struct tss_struct *tss = init_tss + cpu;
18279 unsigned fsindex, gsindex;
18280 fpu_switch_t fpu;
18281
18282 @@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18283 prev->usersp = percpu_read(old_rsp);
18284 percpu_write(old_rsp, next->usersp);
18285 percpu_write(current_task, next_p);
18286 + percpu_write(current_tinfo, &next_p->tinfo);
18287
18288 - percpu_write(kernel_stack,
18289 - (unsigned long)task_stack_page(next_p) +
18290 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18291 + percpu_write(kernel_stack, next->sp0);
18292
18293 /*
18294 * Now maybe reload the debug registers and handle I/O bitmaps
18295 @@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18296 if (!p || p == current || p->state == TASK_RUNNING)
18297 return 0;
18298 stack = (unsigned long)task_stack_page(p);
18299 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18300 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18301 return 0;
18302 fp = *(u64 *)(p->thread.sp);
18303 do {
18304 - if (fp < (unsigned long)stack ||
18305 - fp >= (unsigned long)stack+THREAD_SIZE)
18306 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18307 return 0;
18308 ip = *(u64 *)(fp+8);
18309 if (!in_sched_functions(ip))
18310 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18311 index 5026738..e1b5aa8 100644
18312 --- a/arch/x86/kernel/ptrace.c
18313 +++ b/arch/x86/kernel/ptrace.c
18314 @@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
18315 static int ioperm_get(struct task_struct *target,
18316 const struct user_regset *regset,
18317 unsigned int pos, unsigned int count,
18318 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
18319 +static int ioperm_get(struct task_struct *target,
18320 + const struct user_regset *regset,
18321 + unsigned int pos, unsigned int count,
18322 void *kbuf, void __user *ubuf)
18323 {
18324 if (!target->thread.io_bitmap_ptr)
18325 @@ -823,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
18326 unsigned long addr, unsigned long data)
18327 {
18328 int ret;
18329 - unsigned long __user *datap = (unsigned long __user *)data;
18330 + unsigned long __user *datap = (__force unsigned long __user *)data;
18331
18332 switch (request) {
18333 /* read the word at location addr in the USER area. */
18334 @@ -908,14 +912,14 @@ long arch_ptrace(struct task_struct *child, long request,
18335 if ((int) addr < 0)
18336 return -EIO;
18337 ret = do_get_thread_area(child, addr,
18338 - (struct user_desc __user *)data);
18339 + (__force struct user_desc __user *) data);
18340 break;
18341
18342 case PTRACE_SET_THREAD_AREA:
18343 if ((int) addr < 0)
18344 return -EIO;
18345 ret = do_set_thread_area(child, addr,
18346 - (struct user_desc __user *)data, 0);
18347 + (__force struct user_desc __user *) data, 0);
18348 break;
18349 #endif
18350
18351 @@ -1332,7 +1336,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18352 memset(info, 0, sizeof(*info));
18353 info->si_signo = SIGTRAP;
18354 info->si_code = si_code;
18355 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18356 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18357 }
18358
18359 void user_single_step_siginfo(struct task_struct *tsk,
18360 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18361 index 42eb330..139955c 100644
18362 --- a/arch/x86/kernel/pvclock.c
18363 +++ b/arch/x86/kernel/pvclock.c
18364 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18365 return pv_tsc_khz;
18366 }
18367
18368 -static atomic64_t last_value = ATOMIC64_INIT(0);
18369 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18370
18371 void pvclock_resume(void)
18372 {
18373 - atomic64_set(&last_value, 0);
18374 + atomic64_set_unchecked(&last_value, 0);
18375 }
18376
18377 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18378 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18379 * updating at the same time, and one of them could be slightly behind,
18380 * making the assumption that last_value always go forward fail to hold.
18381 */
18382 - last = atomic64_read(&last_value);
18383 + last = atomic64_read_unchecked(&last_value);
18384 do {
18385 if (ret < last)
18386 return last;
18387 - last = atomic64_cmpxchg(&last_value, last, ret);
18388 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18389 } while (unlikely(last != ret));
18390
18391 return ret;
18392 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18393 index d840e69..98e9581 100644
18394 --- a/arch/x86/kernel/reboot.c
18395 +++ b/arch/x86/kernel/reboot.c
18396 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18397 EXPORT_SYMBOL(pm_power_off);
18398
18399 static const struct desc_ptr no_idt = {};
18400 -static int reboot_mode;
18401 +static unsigned short reboot_mode;
18402 enum reboot_type reboot_type = BOOT_ACPI;
18403 int reboot_force;
18404
18405 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18406 extern const unsigned char machine_real_restart_asm[];
18407 extern const u64 machine_real_restart_gdt[3];
18408
18409 -void machine_real_restart(unsigned int type)
18410 +__noreturn void machine_real_restart(unsigned int type)
18411 {
18412 void *restart_va;
18413 unsigned long restart_pa;
18414 - void (*restart_lowmem)(unsigned int);
18415 + void (* __noreturn restart_lowmem)(unsigned int);
18416 u64 *lowmem_gdt;
18417
18418 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18419 + struct desc_struct *gdt;
18420 +#endif
18421 +
18422 local_irq_disable();
18423
18424 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18425 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18426 boot)". This seems like a fairly standard thing that gets set by
18427 REBOOT.COM programs, and the previous reset routine did this
18428 too. */
18429 - *((unsigned short *)0x472) = reboot_mode;
18430 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18431
18432 /* Patch the GDT in the low memory trampoline */
18433 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18434
18435 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18436 restart_pa = virt_to_phys(restart_va);
18437 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18438 + restart_lowmem = (void *)restart_pa;
18439
18440 /* GDT[0]: GDT self-pointer */
18441 lowmem_gdt[0] =
18442 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18443 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18444
18445 /* Jump to the identity-mapped low memory code */
18446 +
18447 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18448 + gdt = get_cpu_gdt_table(smp_processor_id());
18449 + pax_open_kernel();
18450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18451 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18452 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18453 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18454 +#endif
18455 +#ifdef CONFIG_PAX_KERNEXEC
18456 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18457 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18458 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18459 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18460 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18461 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18462 +#endif
18463 + pax_close_kernel();
18464 +#endif
18465 +
18466 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18467 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18468 + unreachable();
18469 +#else
18470 restart_lowmem(type);
18471 +#endif
18472 +
18473 }
18474 #ifdef CONFIG_APM_MODULE
18475 EXPORT_SYMBOL(machine_real_restart);
18476 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18477 * try to force a triple fault and then cycle between hitting the keyboard
18478 * controller and doing that
18479 */
18480 -static void native_machine_emergency_restart(void)
18481 +__noreturn static void native_machine_emergency_restart(void)
18482 {
18483 int i;
18484 int attempt = 0;
18485 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18486 #endif
18487 }
18488
18489 -static void __machine_emergency_restart(int emergency)
18490 +static __noreturn void __machine_emergency_restart(int emergency)
18491 {
18492 reboot_emergency = emergency;
18493 machine_ops.emergency_restart();
18494 }
18495
18496 -static void native_machine_restart(char *__unused)
18497 +static __noreturn void native_machine_restart(char *__unused)
18498 {
18499 printk("machine restart\n");
18500
18501 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18502 __machine_emergency_restart(0);
18503 }
18504
18505 -static void native_machine_halt(void)
18506 +static __noreturn void native_machine_halt(void)
18507 {
18508 /* stop other cpus and apics */
18509 machine_shutdown();
18510 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18511 stop_this_cpu(NULL);
18512 }
18513
18514 -static void native_machine_power_off(void)
18515 +__noreturn static void native_machine_power_off(void)
18516 {
18517 if (pm_power_off) {
18518 if (!reboot_force)
18519 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18520 }
18521 /* a fallback in case there is no PM info available */
18522 tboot_shutdown(TB_SHUTDOWN_HALT);
18523 + unreachable();
18524 }
18525
18526 struct machine_ops machine_ops = {
18527 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18528 index 7a6f3b3..bed145d7 100644
18529 --- a/arch/x86/kernel/relocate_kernel_64.S
18530 +++ b/arch/x86/kernel/relocate_kernel_64.S
18531 @@ -11,6 +11,7 @@
18532 #include <asm/kexec.h>
18533 #include <asm/processor-flags.h>
18534 #include <asm/pgtable_types.h>
18535 +#include <asm/alternative-asm.h>
18536
18537 /*
18538 * Must be relocatable PIC code callable as a C function
18539 @@ -160,13 +161,14 @@ identity_mapped:
18540 xorq %rbp, %rbp
18541 xorq %r8, %r8
18542 xorq %r9, %r9
18543 - xorq %r10, %r9
18544 + xorq %r10, %r10
18545 xorq %r11, %r11
18546 xorq %r12, %r12
18547 xorq %r13, %r13
18548 xorq %r14, %r14
18549 xorq %r15, %r15
18550
18551 + pax_force_retaddr 0, 1
18552 ret
18553
18554 1:
18555 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18556 index d7d5099..28555d0 100644
18557 --- a/arch/x86/kernel/setup.c
18558 +++ b/arch/x86/kernel/setup.c
18559 @@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18560
18561 switch (data->type) {
18562 case SETUP_E820_EXT:
18563 - parse_e820_ext(data);
18564 + parse_e820_ext((struct setup_data __force_kernel *)data);
18565 break;
18566 case SETUP_DTB:
18567 add_dtb(pa_data);
18568 @@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18569 * area (640->1Mb) as ram even though it is not.
18570 * take them out.
18571 */
18572 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18573 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18574 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18575 }
18576
18577 @@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18578
18579 if (!boot_params.hdr.root_flags)
18580 root_mountflags &= ~MS_RDONLY;
18581 - init_mm.start_code = (unsigned long) _text;
18582 - init_mm.end_code = (unsigned long) _etext;
18583 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18584 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18585 init_mm.end_data = (unsigned long) _edata;
18586 init_mm.brk = _brk_end;
18587
18588 - code_resource.start = virt_to_phys(_text);
18589 - code_resource.end = virt_to_phys(_etext)-1;
18590 - data_resource.start = virt_to_phys(_etext);
18591 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18592 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18593 + data_resource.start = virt_to_phys(_sdata);
18594 data_resource.end = virt_to_phys(_edata)-1;
18595 bss_resource.start = virt_to_phys(&__bss_start);
18596 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18597 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18598 index 71f4727..217419b 100644
18599 --- a/arch/x86/kernel/setup_percpu.c
18600 +++ b/arch/x86/kernel/setup_percpu.c
18601 @@ -21,19 +21,17 @@
18602 #include <asm/cpu.h>
18603 #include <asm/stackprotector.h>
18604
18605 -DEFINE_PER_CPU(int, cpu_number);
18606 +#ifdef CONFIG_SMP
18607 +DEFINE_PER_CPU(unsigned int, cpu_number);
18608 EXPORT_PER_CPU_SYMBOL(cpu_number);
18609 +#endif
18610
18611 -#ifdef CONFIG_X86_64
18612 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18613 -#else
18614 -#define BOOT_PERCPU_OFFSET 0
18615 -#endif
18616
18617 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18618 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18619
18620 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18621 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18622 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18623 };
18624 EXPORT_SYMBOL(__per_cpu_offset);
18625 @@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void)
18626 * Pointer to the allocated area on success, NULL on failure.
18627 */
18628 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18629 + unsigned long align) __size_overflow(2);
18630 +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18631 unsigned long align)
18632 {
18633 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
18634 @@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18635 /*
18636 * Helpers for first chunk memory allocation
18637 */
18638 +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
18639 +
18640 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
18641 {
18642 return pcpu_alloc_bootmem(cpu, size, align);
18643 @@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
18644 {
18645 #ifdef CONFIG_X86_32
18646 struct desc_struct gdt;
18647 + unsigned long base = per_cpu_offset(cpu);
18648
18649 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18650 - 0x2 | DESCTYPE_S, 0x8);
18651 - gdt.s = 1;
18652 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18653 + 0x83 | DESCTYPE_S, 0xC);
18654 write_gdt_entry(get_cpu_gdt_table(cpu),
18655 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18656 #endif
18657 @@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void)
18658 /* alrighty, percpu areas up and running */
18659 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18660 for_each_possible_cpu(cpu) {
18661 +#ifdef CONFIG_CC_STACKPROTECTOR
18662 +#ifdef CONFIG_X86_32
18663 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18664 +#endif
18665 +#endif
18666 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18667 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18668 per_cpu(cpu_number, cpu) = cpu;
18669 @@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void)
18670 */
18671 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18672 #endif
18673 +#ifdef CONFIG_CC_STACKPROTECTOR
18674 +#ifdef CONFIG_X86_32
18675 + if (!cpu)
18676 + per_cpu(stack_canary.canary, cpu) = canary;
18677 +#endif
18678 +#endif
18679 /*
18680 * Up to this point, the boot CPU has been using .init.data
18681 * area. Reload any changed state for the boot CPU.
18682 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18683 index 46a01bd..2e88e6d 100644
18684 --- a/arch/x86/kernel/signal.c
18685 +++ b/arch/x86/kernel/signal.c
18686 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18687 * Align the stack pointer according to the i386 ABI,
18688 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18689 */
18690 - sp = ((sp + 4) & -16ul) - 4;
18691 + sp = ((sp - 12) & -16ul) - 4;
18692 #else /* !CONFIG_X86_32 */
18693 sp = round_down(sp, 16) - 8;
18694 #endif
18695 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18696 * Return an always-bogus address instead so we will die with SIGSEGV.
18697 */
18698 if (onsigstack && !likely(on_sig_stack(sp)))
18699 - return (void __user *)-1L;
18700 + return (__force void __user *)-1L;
18701
18702 /* save i387 state */
18703 if (used_math() && save_i387_xstate(*fpstate) < 0)
18704 - return (void __user *)-1L;
18705 + return (__force void __user *)-1L;
18706
18707 return (void __user *)sp;
18708 }
18709 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18710 }
18711
18712 if (current->mm->context.vdso)
18713 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18714 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18715 else
18716 - restorer = &frame->retcode;
18717 + restorer = (void __user *)&frame->retcode;
18718 if (ka->sa.sa_flags & SA_RESTORER)
18719 restorer = ka->sa.sa_restorer;
18720
18721 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18722 * reasons and because gdb uses it as a signature to notice
18723 * signal handler stack frames.
18724 */
18725 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18726 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18727
18728 if (err)
18729 return -EFAULT;
18730 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18731 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18732
18733 /* Set up to return from userspace. */
18734 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18735 + if (current->mm->context.vdso)
18736 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18737 + else
18738 + restorer = (void __user *)&frame->retcode;
18739 if (ka->sa.sa_flags & SA_RESTORER)
18740 restorer = ka->sa.sa_restorer;
18741 put_user_ex(restorer, &frame->pretcode);
18742 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18743 * reasons and because gdb uses it as a signature to notice
18744 * signal handler stack frames.
18745 */
18746 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18747 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18748 } put_user_catch(err);
18749
18750 if (err)
18751 @@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
18752 * X86_32: vm86 regs switched out by assembly code before reaching
18753 * here, so testing against kernel CS suffices.
18754 */
18755 - if (!user_mode(regs))
18756 + if (!user_mode_novm(regs))
18757 return;
18758
18759 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18760 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18761 index 66d250c..f1b10bd 100644
18762 --- a/arch/x86/kernel/smpboot.c
18763 +++ b/arch/x86/kernel/smpboot.c
18764 @@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18765 set_idle_for_cpu(cpu, c_idle.idle);
18766 do_rest:
18767 per_cpu(current_task, cpu) = c_idle.idle;
18768 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18769 #ifdef CONFIG_X86_32
18770 /* Stack for startup_32 can be just as for start_secondary onwards */
18771 irq_ctx_init(cpu);
18772 #else
18773 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18774 initial_gs = per_cpu_offset(cpu);
18775 - per_cpu(kernel_stack, cpu) =
18776 - (unsigned long)task_stack_page(c_idle.idle) -
18777 - KERNEL_STACK_OFFSET + THREAD_SIZE;
18778 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18779 #endif
18780 +
18781 + pax_open_kernel();
18782 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18783 + pax_close_kernel();
18784 +
18785 initial_code = (unsigned long)start_secondary;
18786 stack_start = c_idle.idle->thread.sp;
18787
18788 @@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18789
18790 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18791
18792 +#ifdef CONFIG_PAX_PER_CPU_PGD
18793 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18794 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18795 + KERNEL_PGD_PTRS);
18796 +#endif
18797 +
18798 err = do_boot_cpu(apicid, cpu);
18799 if (err) {
18800 pr_debug("do_boot_cpu failed %d\n", err);
18801 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18802 index c346d11..d43b163 100644
18803 --- a/arch/x86/kernel/step.c
18804 +++ b/arch/x86/kernel/step.c
18805 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18806 struct desc_struct *desc;
18807 unsigned long base;
18808
18809 - seg &= ~7UL;
18810 + seg >>= 3;
18811
18812 mutex_lock(&child->mm->context.lock);
18813 - if (unlikely((seg >> 3) >= child->mm->context.size))
18814 + if (unlikely(seg >= child->mm->context.size))
18815 addr = -1L; /* bogus selector, access would fault */
18816 else {
18817 desc = child->mm->context.ldt + seg;
18818 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18819 addr += base;
18820 }
18821 mutex_unlock(&child->mm->context.lock);
18822 - }
18823 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18824 + addr = ktla_ktva(addr);
18825
18826 return addr;
18827 }
18828 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18829 unsigned char opcode[15];
18830 unsigned long addr = convert_ip_to_linear(child, regs);
18831
18832 + if (addr == -EINVAL)
18833 + return 0;
18834 +
18835 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18836 for (i = 0; i < copied; i++) {
18837 switch (opcode[i]) {
18838 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18839 index 0b0cb5f..db6b9ed 100644
18840 --- a/arch/x86/kernel/sys_i386_32.c
18841 +++ b/arch/x86/kernel/sys_i386_32.c
18842 @@ -24,17 +24,224 @@
18843
18844 #include <asm/syscalls.h>
18845
18846 -/*
18847 - * Do a system call from kernel instead of calling sys_execve so we
18848 - * end up with proper pt_regs.
18849 - */
18850 -int kernel_execve(const char *filename,
18851 - const char *const argv[],
18852 - const char *const envp[])
18853 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18854 {
18855 - long __res;
18856 - asm volatile ("int $0x80"
18857 - : "=a" (__res)
18858 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18859 - return __res;
18860 + unsigned long pax_task_size = TASK_SIZE;
18861 +
18862 +#ifdef CONFIG_PAX_SEGMEXEC
18863 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18864 + pax_task_size = SEGMEXEC_TASK_SIZE;
18865 +#endif
18866 +
18867 + if (len > pax_task_size || addr > pax_task_size - len)
18868 + return -EINVAL;
18869 +
18870 + return 0;
18871 +}
18872 +
18873 +unsigned long
18874 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
18875 + unsigned long len, unsigned long pgoff, unsigned long flags)
18876 +{
18877 + struct mm_struct *mm = current->mm;
18878 + struct vm_area_struct *vma;
18879 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18880 +
18881 +#ifdef CONFIG_PAX_SEGMEXEC
18882 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18883 + pax_task_size = SEGMEXEC_TASK_SIZE;
18884 +#endif
18885 +
18886 + pax_task_size -= PAGE_SIZE;
18887 +
18888 + if (len > pax_task_size)
18889 + return -ENOMEM;
18890 +
18891 + if (flags & MAP_FIXED)
18892 + return addr;
18893 +
18894 +#ifdef CONFIG_PAX_RANDMMAP
18895 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18896 +#endif
18897 +
18898 + if (addr) {
18899 + addr = PAGE_ALIGN(addr);
18900 + if (pax_task_size - len >= addr) {
18901 + vma = find_vma(mm, addr);
18902 + if (check_heap_stack_gap(vma, addr, len))
18903 + return addr;
18904 + }
18905 + }
18906 + if (len > mm->cached_hole_size) {
18907 + start_addr = addr = mm->free_area_cache;
18908 + } else {
18909 + start_addr = addr = mm->mmap_base;
18910 + mm->cached_hole_size = 0;
18911 + }
18912 +
18913 +#ifdef CONFIG_PAX_PAGEEXEC
18914 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18915 + start_addr = 0x00110000UL;
18916 +
18917 +#ifdef CONFIG_PAX_RANDMMAP
18918 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18919 + start_addr += mm->delta_mmap & 0x03FFF000UL;
18920 +#endif
18921 +
18922 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18923 + start_addr = addr = mm->mmap_base;
18924 + else
18925 + addr = start_addr;
18926 + }
18927 +#endif
18928 +
18929 +full_search:
18930 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18931 + /* At this point: (!vma || addr < vma->vm_end). */
18932 + if (pax_task_size - len < addr) {
18933 + /*
18934 + * Start a new search - just in case we missed
18935 + * some holes.
18936 + */
18937 + if (start_addr != mm->mmap_base) {
18938 + start_addr = addr = mm->mmap_base;
18939 + mm->cached_hole_size = 0;
18940 + goto full_search;
18941 + }
18942 + return -ENOMEM;
18943 + }
18944 + if (check_heap_stack_gap(vma, addr, len))
18945 + break;
18946 + if (addr + mm->cached_hole_size < vma->vm_start)
18947 + mm->cached_hole_size = vma->vm_start - addr;
18948 + addr = vma->vm_end;
18949 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
18950 + start_addr = addr = mm->mmap_base;
18951 + mm->cached_hole_size = 0;
18952 + goto full_search;
18953 + }
18954 + }
18955 +
18956 + /*
18957 + * Remember the place where we stopped the search:
18958 + */
18959 + mm->free_area_cache = addr + len;
18960 + return addr;
18961 +}
18962 +
18963 +unsigned long
18964 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18965 + const unsigned long len, const unsigned long pgoff,
18966 + const unsigned long flags)
18967 +{
18968 + struct vm_area_struct *vma;
18969 + struct mm_struct *mm = current->mm;
18970 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18971 +
18972 +#ifdef CONFIG_PAX_SEGMEXEC
18973 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18974 + pax_task_size = SEGMEXEC_TASK_SIZE;
18975 +#endif
18976 +
18977 + pax_task_size -= PAGE_SIZE;
18978 +
18979 + /* requested length too big for entire address space */
18980 + if (len > pax_task_size)
18981 + return -ENOMEM;
18982 +
18983 + if (flags & MAP_FIXED)
18984 + return addr;
18985 +
18986 +#ifdef CONFIG_PAX_PAGEEXEC
18987 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18988 + goto bottomup;
18989 +#endif
18990 +
18991 +#ifdef CONFIG_PAX_RANDMMAP
18992 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18993 +#endif
18994 +
18995 + /* requesting a specific address */
18996 + if (addr) {
18997 + addr = PAGE_ALIGN(addr);
18998 + if (pax_task_size - len >= addr) {
18999 + vma = find_vma(mm, addr);
19000 + if (check_heap_stack_gap(vma, addr, len))
19001 + return addr;
19002 + }
19003 + }
19004 +
19005 + /* check if free_area_cache is useful for us */
19006 + if (len <= mm->cached_hole_size) {
19007 + mm->cached_hole_size = 0;
19008 + mm->free_area_cache = mm->mmap_base;
19009 + }
19010 +
19011 + /* either no address requested or can't fit in requested address hole */
19012 + addr = mm->free_area_cache;
19013 +
19014 + /* make sure it can fit in the remaining address space */
19015 + if (addr > len) {
19016 + vma = find_vma(mm, addr-len);
19017 + if (check_heap_stack_gap(vma, addr - len, len))
19018 + /* remember the address as a hint for next time */
19019 + return (mm->free_area_cache = addr-len);
19020 + }
19021 +
19022 + if (mm->mmap_base < len)
19023 + goto bottomup;
19024 +
19025 + addr = mm->mmap_base-len;
19026 +
19027 + do {
19028 + /*
19029 + * Lookup failure means no vma is above this address,
19030 + * else if new region fits below vma->vm_start,
19031 + * return with success:
19032 + */
19033 + vma = find_vma(mm, addr);
19034 + if (check_heap_stack_gap(vma, addr, len))
19035 + /* remember the address as a hint for next time */
19036 + return (mm->free_area_cache = addr);
19037 +
19038 + /* remember the largest hole we saw so far */
19039 + if (addr + mm->cached_hole_size < vma->vm_start)
19040 + mm->cached_hole_size = vma->vm_start - addr;
19041 +
19042 + /* try just below the current vma->vm_start */
19043 + addr = skip_heap_stack_gap(vma, len);
19044 + } while (!IS_ERR_VALUE(addr));
19045 +
19046 +bottomup:
19047 + /*
19048 + * A failed mmap() very likely causes application failure,
19049 + * so fall back to the bottom-up function here. This scenario
19050 + * can happen with large stack limits and large mmap()
19051 + * allocations.
19052 + */
19053 +
19054 +#ifdef CONFIG_PAX_SEGMEXEC
19055 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19056 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19057 + else
19058 +#endif
19059 +
19060 + mm->mmap_base = TASK_UNMAPPED_BASE;
19061 +
19062 +#ifdef CONFIG_PAX_RANDMMAP
19063 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19064 + mm->mmap_base += mm->delta_mmap;
19065 +#endif
19066 +
19067 + mm->free_area_cache = mm->mmap_base;
19068 + mm->cached_hole_size = ~0UL;
19069 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19070 + /*
19071 + * Restore the topdown base:
19072 + */
19073 + mm->mmap_base = base;
19074 + mm->free_area_cache = base;
19075 + mm->cached_hole_size = ~0UL;
19076 +
19077 + return addr;
19078 }
19079 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19080 index 0514890..3dbebce 100644
19081 --- a/arch/x86/kernel/sys_x86_64.c
19082 +++ b/arch/x86/kernel/sys_x86_64.c
19083 @@ -95,8 +95,8 @@ out:
19084 return error;
19085 }
19086
19087 -static void find_start_end(unsigned long flags, unsigned long *begin,
19088 - unsigned long *end)
19089 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19090 + unsigned long *begin, unsigned long *end)
19091 {
19092 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19093 unsigned long new_begin;
19094 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19095 *begin = new_begin;
19096 }
19097 } else {
19098 - *begin = TASK_UNMAPPED_BASE;
19099 + *begin = mm->mmap_base;
19100 *end = TASK_SIZE;
19101 }
19102 }
19103 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19104 if (flags & MAP_FIXED)
19105 return addr;
19106
19107 - find_start_end(flags, &begin, &end);
19108 + find_start_end(mm, flags, &begin, &end);
19109
19110 if (len > end)
19111 return -ENOMEM;
19112
19113 +#ifdef CONFIG_PAX_RANDMMAP
19114 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19115 +#endif
19116 +
19117 if (addr) {
19118 addr = PAGE_ALIGN(addr);
19119 vma = find_vma(mm, addr);
19120 - if (end - len >= addr &&
19121 - (!vma || addr + len <= vma->vm_start))
19122 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19123 return addr;
19124 }
19125 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19126 @@ -172,7 +175,7 @@ full_search:
19127 }
19128 return -ENOMEM;
19129 }
19130 - if (!vma || addr + len <= vma->vm_start) {
19131 + if (check_heap_stack_gap(vma, addr, len)) {
19132 /*
19133 * Remember the place where we stopped the search:
19134 */
19135 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19136 {
19137 struct vm_area_struct *vma;
19138 struct mm_struct *mm = current->mm;
19139 - unsigned long addr = addr0;
19140 + unsigned long base = mm->mmap_base, addr = addr0;
19141
19142 /* requested length too big for entire address space */
19143 if (len > TASK_SIZE)
19144 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19145 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19146 goto bottomup;
19147
19148 +#ifdef CONFIG_PAX_RANDMMAP
19149 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19150 +#endif
19151 +
19152 /* requesting a specific address */
19153 if (addr) {
19154 addr = PAGE_ALIGN(addr);
19155 - vma = find_vma(mm, addr);
19156 - if (TASK_SIZE - len >= addr &&
19157 - (!vma || addr + len <= vma->vm_start))
19158 - return addr;
19159 + if (TASK_SIZE - len >= addr) {
19160 + vma = find_vma(mm, addr);
19161 + if (check_heap_stack_gap(vma, addr, len))
19162 + return addr;
19163 + }
19164 }
19165
19166 /* check if free_area_cache is useful for us */
19167 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19168 ALIGN_TOPDOWN);
19169
19170 vma = find_vma(mm, tmp_addr);
19171 - if (!vma || tmp_addr + len <= vma->vm_start)
19172 + if (check_heap_stack_gap(vma, tmp_addr, len))
19173 /* remember the address as a hint for next time */
19174 return mm->free_area_cache = tmp_addr;
19175 }
19176 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19177 * return with success:
19178 */
19179 vma = find_vma(mm, addr);
19180 - if (!vma || addr+len <= vma->vm_start)
19181 + if (check_heap_stack_gap(vma, addr, len))
19182 /* remember the address as a hint for next time */
19183 return mm->free_area_cache = addr;
19184
19185 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19186 mm->cached_hole_size = vma->vm_start - addr;
19187
19188 /* try just below the current vma->vm_start */
19189 - addr = vma->vm_start-len;
19190 - } while (len < vma->vm_start);
19191 + addr = skip_heap_stack_gap(vma, len);
19192 + } while (!IS_ERR_VALUE(addr));
19193
19194 bottomup:
19195 /*
19196 @@ -270,13 +278,21 @@ bottomup:
19197 * can happen with large stack limits and large mmap()
19198 * allocations.
19199 */
19200 + mm->mmap_base = TASK_UNMAPPED_BASE;
19201 +
19202 +#ifdef CONFIG_PAX_RANDMMAP
19203 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19204 + mm->mmap_base += mm->delta_mmap;
19205 +#endif
19206 +
19207 + mm->free_area_cache = mm->mmap_base;
19208 mm->cached_hole_size = ~0UL;
19209 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19210 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19211 /*
19212 * Restore the topdown base:
19213 */
19214 - mm->free_area_cache = mm->mmap_base;
19215 + mm->mmap_base = base;
19216 + mm->free_area_cache = base;
19217 mm->cached_hole_size = ~0UL;
19218
19219 return addr;
19220 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19221 index e2410e2..4fe3fbc 100644
19222 --- a/arch/x86/kernel/tboot.c
19223 +++ b/arch/x86/kernel/tboot.c
19224 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19225
19226 void tboot_shutdown(u32 shutdown_type)
19227 {
19228 - void (*shutdown)(void);
19229 + void (* __noreturn shutdown)(void);
19230
19231 if (!tboot_enabled())
19232 return;
19233 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19234
19235 switch_to_tboot_pt();
19236
19237 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19238 + shutdown = (void *)tboot->shutdown_entry;
19239 shutdown();
19240
19241 /* should not reach here */
19242 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19243 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19244 }
19245
19246 -static atomic_t ap_wfs_count;
19247 +static atomic_unchecked_t ap_wfs_count;
19248
19249 static int tboot_wait_for_aps(int num_aps)
19250 {
19251 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19252 {
19253 switch (action) {
19254 case CPU_DYING:
19255 - atomic_inc(&ap_wfs_count);
19256 + atomic_inc_unchecked(&ap_wfs_count);
19257 if (num_online_cpus() == 1)
19258 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19259 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19260 return NOTIFY_BAD;
19261 break;
19262 }
19263 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
19264
19265 tboot_create_trampoline();
19266
19267 - atomic_set(&ap_wfs_count, 0);
19268 + atomic_set_unchecked(&ap_wfs_count, 0);
19269 register_hotcpu_notifier(&tboot_cpu_notifier);
19270 return 0;
19271 }
19272 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19273 index dd5fbf4..b7f2232 100644
19274 --- a/arch/x86/kernel/time.c
19275 +++ b/arch/x86/kernel/time.c
19276 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19277 {
19278 unsigned long pc = instruction_pointer(regs);
19279
19280 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19281 + if (!user_mode(regs) && in_lock_functions(pc)) {
19282 #ifdef CONFIG_FRAME_POINTER
19283 - return *(unsigned long *)(regs->bp + sizeof(long));
19284 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19285 #else
19286 unsigned long *sp =
19287 (unsigned long *)kernel_stack_pointer(regs);
19288 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19289 * or above a saved flags. Eflags has bits 22-31 zero,
19290 * kernel addresses don't.
19291 */
19292 +
19293 +#ifdef CONFIG_PAX_KERNEXEC
19294 + return ktla_ktva(sp[0]);
19295 +#else
19296 if (sp[0] >> 22)
19297 return sp[0];
19298 if (sp[1] >> 22)
19299 return sp[1];
19300 #endif
19301 +
19302 +#endif
19303 }
19304 return pc;
19305 }
19306 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19307 index bcfec2d..8f88b4a 100644
19308 --- a/arch/x86/kernel/tls.c
19309 +++ b/arch/x86/kernel/tls.c
19310 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19311 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19312 return -EINVAL;
19313
19314 +#ifdef CONFIG_PAX_SEGMEXEC
19315 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19316 + return -EINVAL;
19317 +#endif
19318 +
19319 set_tls_desc(p, idx, &info, 1);
19320
19321 return 0;
19322 diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
19323 index 2f083a2..7d3fecc 100644
19324 --- a/arch/x86/kernel/tls.h
19325 +++ b/arch/x86/kernel/tls.h
19326 @@ -16,6 +16,6 @@
19327
19328 extern user_regset_active_fn regset_tls_active;
19329 extern user_regset_get_fn regset_tls_get;
19330 -extern user_regset_set_fn regset_tls_set;
19331 +extern user_regset_set_fn regset_tls_set __size_overflow(4);
19332
19333 #endif /* _ARCH_X86_KERNEL_TLS_H */
19334 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19335 index 451c0a7..e57f551 100644
19336 --- a/arch/x86/kernel/trampoline_32.S
19337 +++ b/arch/x86/kernel/trampoline_32.S
19338 @@ -32,6 +32,12 @@
19339 #include <asm/segment.h>
19340 #include <asm/page_types.h>
19341
19342 +#ifdef CONFIG_PAX_KERNEXEC
19343 +#define ta(X) (X)
19344 +#else
19345 +#define ta(X) ((X) - __PAGE_OFFSET)
19346 +#endif
19347 +
19348 #ifdef CONFIG_SMP
19349
19350 .section ".x86_trampoline","a"
19351 @@ -62,7 +68,7 @@ r_base = .
19352 inc %ax # protected mode (PE) bit
19353 lmsw %ax # into protected mode
19354 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19355 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19356 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19357
19358 # These need to be in the same 64K segment as the above;
19359 # hence we don't use the boot_gdt_descr defined in head.S
19360 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19361 index 09ff517..df19fbff 100644
19362 --- a/arch/x86/kernel/trampoline_64.S
19363 +++ b/arch/x86/kernel/trampoline_64.S
19364 @@ -90,7 +90,7 @@ startup_32:
19365 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19366 movl %eax, %ds
19367
19368 - movl $X86_CR4_PAE, %eax
19369 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19370 movl %eax, %cr4 # Enable PAE mode
19371
19372 # Setup trampoline 4 level pagetables
19373 @@ -138,7 +138,7 @@ tidt:
19374 # so the kernel can live anywhere
19375 .balign 4
19376 tgdt:
19377 - .short tgdt_end - tgdt # gdt limit
19378 + .short tgdt_end - tgdt - 1 # gdt limit
19379 .long tgdt - r_base
19380 .short 0
19381 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19382 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19383 index 4bbe04d..41d0943 100644
19384 --- a/arch/x86/kernel/traps.c
19385 +++ b/arch/x86/kernel/traps.c
19386 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19387
19388 /* Do we ignore FPU interrupts ? */
19389 char ignore_fpu_irq;
19390 -
19391 -/*
19392 - * The IDT has to be page-aligned to simplify the Pentium
19393 - * F0 0F bug workaround.
19394 - */
19395 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19396 #endif
19397
19398 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19399 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19400 }
19401
19402 static void __kprobes
19403 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19404 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19405 long error_code, siginfo_t *info)
19406 {
19407 struct task_struct *tsk = current;
19408
19409 #ifdef CONFIG_X86_32
19410 - if (regs->flags & X86_VM_MASK) {
19411 + if (v8086_mode(regs)) {
19412 /*
19413 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19414 * On nmi (interrupt 2), do_trap should not be called.
19415 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19416 }
19417 #endif
19418
19419 - if (!user_mode(regs))
19420 + if (!user_mode_novm(regs))
19421 goto kernel_trap;
19422
19423 #ifdef CONFIG_X86_32
19424 @@ -148,7 +142,7 @@ trap_signal:
19425 printk_ratelimit()) {
19426 printk(KERN_INFO
19427 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19428 - tsk->comm, tsk->pid, str,
19429 + tsk->comm, task_pid_nr(tsk), str,
19430 regs->ip, regs->sp, error_code);
19431 print_vma_addr(" in ", regs->ip);
19432 printk("\n");
19433 @@ -165,8 +159,20 @@ kernel_trap:
19434 if (!fixup_exception(regs)) {
19435 tsk->thread.error_code = error_code;
19436 tsk->thread.trap_no = trapnr;
19437 +
19438 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19439 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19440 + str = "PAX: suspicious stack segment fault";
19441 +#endif
19442 +
19443 die(str, regs, error_code);
19444 }
19445 +
19446 +#ifdef CONFIG_PAX_REFCOUNT
19447 + if (trapnr == 4)
19448 + pax_report_refcount_overflow(regs);
19449 +#endif
19450 +
19451 return;
19452
19453 #ifdef CONFIG_X86_32
19454 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19455 conditional_sti(regs);
19456
19457 #ifdef CONFIG_X86_32
19458 - if (regs->flags & X86_VM_MASK)
19459 + if (v8086_mode(regs))
19460 goto gp_in_vm86;
19461 #endif
19462
19463 tsk = current;
19464 - if (!user_mode(regs))
19465 + if (!user_mode_novm(regs))
19466 goto gp_in_kernel;
19467
19468 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19469 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19470 + struct mm_struct *mm = tsk->mm;
19471 + unsigned long limit;
19472 +
19473 + down_write(&mm->mmap_sem);
19474 + limit = mm->context.user_cs_limit;
19475 + if (limit < TASK_SIZE) {
19476 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19477 + up_write(&mm->mmap_sem);
19478 + return;
19479 + }
19480 + up_write(&mm->mmap_sem);
19481 + }
19482 +#endif
19483 +
19484 tsk->thread.error_code = error_code;
19485 tsk->thread.trap_no = 13;
19486
19487 @@ -295,6 +317,13 @@ gp_in_kernel:
19488 if (notify_die(DIE_GPF, "general protection fault", regs,
19489 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19490 return;
19491 +
19492 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19493 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19494 + die("PAX: suspicious general protection fault", regs, error_code);
19495 + else
19496 +#endif
19497 +
19498 die("general protection fault", regs, error_code);
19499 }
19500
19501 @@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19502 /* It's safe to allow irq's after DR6 has been saved */
19503 preempt_conditional_sti(regs);
19504
19505 - if (regs->flags & X86_VM_MASK) {
19506 + if (v8086_mode(regs)) {
19507 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19508 error_code, 1);
19509 preempt_conditional_cli(regs);
19510 @@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19511 * We already checked v86 mode above, so we can check for kernel mode
19512 * by just checking the CPL of CS.
19513 */
19514 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19515 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19516 tsk->thread.debugreg6 &= ~DR_STEP;
19517 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19518 regs->flags &= ~X86_EFLAGS_TF;
19519 @@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19520 return;
19521 conditional_sti(regs);
19522
19523 - if (!user_mode_vm(regs))
19524 + if (!user_mode(regs))
19525 {
19526 if (!fixup_exception(regs)) {
19527 task->thread.error_code = error_code;
19528 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19529 index b9242ba..50c5edd 100644
19530 --- a/arch/x86/kernel/verify_cpu.S
19531 +++ b/arch/x86/kernel/verify_cpu.S
19532 @@ -20,6 +20,7 @@
19533 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19534 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19535 * arch/x86/kernel/head_32.S: processor startup
19536 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19537 *
19538 * verify_cpu, returns the status of longmode and SSE in register %eax.
19539 * 0: Success 1: Failure
19540 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19541 index 328cb37..f37fee1 100644
19542 --- a/arch/x86/kernel/vm86_32.c
19543 +++ b/arch/x86/kernel/vm86_32.c
19544 @@ -41,6 +41,7 @@
19545 #include <linux/ptrace.h>
19546 #include <linux/audit.h>
19547 #include <linux/stddef.h>
19548 +#include <linux/grsecurity.h>
19549
19550 #include <asm/uaccess.h>
19551 #include <asm/io.h>
19552 @@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
19553 /* convert vm86_regs to kernel_vm86_regs */
19554 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19555 const struct vm86_regs __user *user,
19556 + unsigned extra) __size_overflow(3);
19557 +static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19558 + const struct vm86_regs __user *user,
19559 unsigned extra)
19560 {
19561 int ret = 0;
19562 @@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19563 do_exit(SIGSEGV);
19564 }
19565
19566 - tss = &per_cpu(init_tss, get_cpu());
19567 + tss = init_tss + get_cpu();
19568 current->thread.sp0 = current->thread.saved_sp0;
19569 current->thread.sysenter_cs = __KERNEL_CS;
19570 load_sp0(tss, &current->thread);
19571 @@ -210,6 +214,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19572 struct task_struct *tsk;
19573 int tmp, ret = -EPERM;
19574
19575 +#ifdef CONFIG_GRKERNSEC_VM86
19576 + if (!capable(CAP_SYS_RAWIO)) {
19577 + gr_handle_vm86();
19578 + goto out;
19579 + }
19580 +#endif
19581 +
19582 tsk = current;
19583 if (tsk->thread.saved_sp0)
19584 goto out;
19585 @@ -240,6 +251,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19586 int tmp, ret;
19587 struct vm86plus_struct __user *v86;
19588
19589 +#ifdef CONFIG_GRKERNSEC_VM86
19590 + if (!capable(CAP_SYS_RAWIO)) {
19591 + gr_handle_vm86();
19592 + ret = -EPERM;
19593 + goto out;
19594 + }
19595 +#endif
19596 +
19597 tsk = current;
19598 switch (cmd) {
19599 case VM86_REQUEST_IRQ:
19600 @@ -326,7 +345,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19601 tsk->thread.saved_fs = info->regs32->fs;
19602 tsk->thread.saved_gs = get_user_gs(info->regs32);
19603
19604 - tss = &per_cpu(init_tss, get_cpu());
19605 + tss = init_tss + get_cpu();
19606 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19607 if (cpu_has_sep)
19608 tsk->thread.sysenter_cs = 0;
19609 @@ -533,7 +552,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19610 goto cannot_handle;
19611 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19612 goto cannot_handle;
19613 - intr_ptr = (unsigned long __user *) (i << 2);
19614 + intr_ptr = (__force unsigned long __user *) (i << 2);
19615 if (get_user(segoffs, intr_ptr))
19616 goto cannot_handle;
19617 if ((segoffs >> 16) == BIOSSEG)
19618 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19619 index 0f703f1..9e15f64 100644
19620 --- a/arch/x86/kernel/vmlinux.lds.S
19621 +++ b/arch/x86/kernel/vmlinux.lds.S
19622 @@ -26,6 +26,13 @@
19623 #include <asm/page_types.h>
19624 #include <asm/cache.h>
19625 #include <asm/boot.h>
19626 +#include <asm/segment.h>
19627 +
19628 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19629 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19630 +#else
19631 +#define __KERNEL_TEXT_OFFSET 0
19632 +#endif
19633
19634 #undef i386 /* in case the preprocessor is a 32bit one */
19635
19636 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19637
19638 PHDRS {
19639 text PT_LOAD FLAGS(5); /* R_E */
19640 +#ifdef CONFIG_X86_32
19641 + module PT_LOAD FLAGS(5); /* R_E */
19642 +#endif
19643 +#ifdef CONFIG_XEN
19644 + rodata PT_LOAD FLAGS(5); /* R_E */
19645 +#else
19646 + rodata PT_LOAD FLAGS(4); /* R__ */
19647 +#endif
19648 data PT_LOAD FLAGS(6); /* RW_ */
19649 -#ifdef CONFIG_X86_64
19650 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19651 #ifdef CONFIG_SMP
19652 percpu PT_LOAD FLAGS(6); /* RW_ */
19653 #endif
19654 + text.init PT_LOAD FLAGS(5); /* R_E */
19655 + text.exit PT_LOAD FLAGS(5); /* R_E */
19656 init PT_LOAD FLAGS(7); /* RWE */
19657 -#endif
19658 note PT_NOTE FLAGS(0); /* ___ */
19659 }
19660
19661 SECTIONS
19662 {
19663 #ifdef CONFIG_X86_32
19664 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19665 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19666 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19667 #else
19668 - . = __START_KERNEL;
19669 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19670 + . = __START_KERNEL;
19671 #endif
19672
19673 /* Text and read-only data */
19674 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19675 - _text = .;
19676 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19677 /* bootstrapping code */
19678 +#ifdef CONFIG_X86_32
19679 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19680 +#else
19681 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19682 +#endif
19683 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19684 + _text = .;
19685 HEAD_TEXT
19686 #ifdef CONFIG_X86_32
19687 . = ALIGN(PAGE_SIZE);
19688 @@ -108,13 +128,47 @@ SECTIONS
19689 IRQENTRY_TEXT
19690 *(.fixup)
19691 *(.gnu.warning)
19692 - /* End of text section */
19693 - _etext = .;
19694 } :text = 0x9090
19695
19696 - NOTES :text :note
19697 + . += __KERNEL_TEXT_OFFSET;
19698
19699 - EXCEPTION_TABLE(16) :text = 0x9090
19700 +#ifdef CONFIG_X86_32
19701 + . = ALIGN(PAGE_SIZE);
19702 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19703 +
19704 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19705 + MODULES_EXEC_VADDR = .;
19706 + BYTE(0)
19707 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19708 + . = ALIGN(HPAGE_SIZE);
19709 + MODULES_EXEC_END = . - 1;
19710 +#endif
19711 +
19712 + } :module
19713 +#endif
19714 +
19715 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19716 + /* End of text section */
19717 + _etext = . - __KERNEL_TEXT_OFFSET;
19718 + }
19719 +
19720 +#ifdef CONFIG_X86_32
19721 + . = ALIGN(PAGE_SIZE);
19722 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19723 + *(.idt)
19724 + . = ALIGN(PAGE_SIZE);
19725 + *(.empty_zero_page)
19726 + *(.initial_pg_fixmap)
19727 + *(.initial_pg_pmd)
19728 + *(.initial_page_table)
19729 + *(.swapper_pg_dir)
19730 + } :rodata
19731 +#endif
19732 +
19733 + . = ALIGN(PAGE_SIZE);
19734 + NOTES :rodata :note
19735 +
19736 + EXCEPTION_TABLE(16) :rodata
19737
19738 #if defined(CONFIG_DEBUG_RODATA)
19739 /* .text should occupy whole number of pages */
19740 @@ -126,16 +180,20 @@ SECTIONS
19741
19742 /* Data */
19743 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19744 +
19745 +#ifdef CONFIG_PAX_KERNEXEC
19746 + . = ALIGN(HPAGE_SIZE);
19747 +#else
19748 + . = ALIGN(PAGE_SIZE);
19749 +#endif
19750 +
19751 /* Start of data section */
19752 _sdata = .;
19753
19754 /* init_task */
19755 INIT_TASK_DATA(THREAD_SIZE)
19756
19757 -#ifdef CONFIG_X86_32
19758 - /* 32 bit has nosave before _edata */
19759 NOSAVE_DATA
19760 -#endif
19761
19762 PAGE_ALIGNED_DATA(PAGE_SIZE)
19763
19764 @@ -176,12 +234,19 @@ SECTIONS
19765 #endif /* CONFIG_X86_64 */
19766
19767 /* Init code and data - will be freed after init */
19768 - . = ALIGN(PAGE_SIZE);
19769 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19770 + BYTE(0)
19771 +
19772 +#ifdef CONFIG_PAX_KERNEXEC
19773 + . = ALIGN(HPAGE_SIZE);
19774 +#else
19775 + . = ALIGN(PAGE_SIZE);
19776 +#endif
19777 +
19778 __init_begin = .; /* paired with __init_end */
19779 - }
19780 + } :init.begin
19781
19782 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19783 +#ifdef CONFIG_SMP
19784 /*
19785 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19786 * output PHDR, so the next output section - .init.text - should
19787 @@ -190,12 +255,27 @@ SECTIONS
19788 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19789 #endif
19790
19791 - INIT_TEXT_SECTION(PAGE_SIZE)
19792 -#ifdef CONFIG_X86_64
19793 - :init
19794 -#endif
19795 + . = ALIGN(PAGE_SIZE);
19796 + init_begin = .;
19797 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19798 + VMLINUX_SYMBOL(_sinittext) = .;
19799 + INIT_TEXT
19800 + VMLINUX_SYMBOL(_einittext) = .;
19801 + . = ALIGN(PAGE_SIZE);
19802 + } :text.init
19803
19804 - INIT_DATA_SECTION(16)
19805 + /*
19806 + * .exit.text is discard at runtime, not link time, to deal with
19807 + * references from .altinstructions and .eh_frame
19808 + */
19809 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19810 + EXIT_TEXT
19811 + . = ALIGN(16);
19812 + } :text.exit
19813 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19814 +
19815 + . = ALIGN(PAGE_SIZE);
19816 + INIT_DATA_SECTION(16) :init
19817
19818 /*
19819 * Code and data for a variety of lowlevel trampolines, to be
19820 @@ -269,19 +349,12 @@ SECTIONS
19821 }
19822
19823 . = ALIGN(8);
19824 - /*
19825 - * .exit.text is discard at runtime, not link time, to deal with
19826 - * references from .altinstructions and .eh_frame
19827 - */
19828 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19829 - EXIT_TEXT
19830 - }
19831
19832 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19833 EXIT_DATA
19834 }
19835
19836 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19837 +#ifndef CONFIG_SMP
19838 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19839 #endif
19840
19841 @@ -300,16 +373,10 @@ SECTIONS
19842 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19843 __smp_locks = .;
19844 *(.smp_locks)
19845 - . = ALIGN(PAGE_SIZE);
19846 __smp_locks_end = .;
19847 + . = ALIGN(PAGE_SIZE);
19848 }
19849
19850 -#ifdef CONFIG_X86_64
19851 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19852 - NOSAVE_DATA
19853 - }
19854 -#endif
19855 -
19856 /* BSS */
19857 . = ALIGN(PAGE_SIZE);
19858 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19859 @@ -325,6 +392,7 @@ SECTIONS
19860 __brk_base = .;
19861 . += 64 * 1024; /* 64k alignment slop space */
19862 *(.brk_reservation) /* areas brk users have reserved */
19863 + . = ALIGN(HPAGE_SIZE);
19864 __brk_limit = .;
19865 }
19866
19867 @@ -351,13 +419,12 @@ SECTIONS
19868 * for the boot processor.
19869 */
19870 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19871 -INIT_PER_CPU(gdt_page);
19872 INIT_PER_CPU(irq_stack_union);
19873
19874 /*
19875 * Build-time check on the image size:
19876 */
19877 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19878 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19879 "kernel image bigger than KERNEL_IMAGE_SIZE");
19880
19881 #ifdef CONFIG_SMP
19882 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19883 index b07ba93..a212969 100644
19884 --- a/arch/x86/kernel/vsyscall_64.c
19885 +++ b/arch/x86/kernel/vsyscall_64.c
19886 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19887 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19888 };
19889
19890 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
19891 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19892
19893 static int __init vsyscall_setup(char *str)
19894 {
19895 if (str) {
19896 if (!strcmp("emulate", str))
19897 vsyscall_mode = EMULATE;
19898 - else if (!strcmp("native", str))
19899 - vsyscall_mode = NATIVE;
19900 else if (!strcmp("none", str))
19901 vsyscall_mode = NONE;
19902 else
19903 @@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19904
19905 tsk = current;
19906 if (seccomp_mode(&tsk->seccomp))
19907 - do_exit(SIGKILL);
19908 + do_group_exit(SIGKILL);
19909
19910 /*
19911 * With a real vsyscall, page faults cause SIGSEGV. We want to
19912 @@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19913 return true;
19914
19915 sigsegv:
19916 - force_sig(SIGSEGV, current);
19917 - return true;
19918 + do_group_exit(SIGKILL);
19919 }
19920
19921 /*
19922 @@ -333,10 +330,7 @@ void __init map_vsyscall(void)
19923 extern char __vvar_page;
19924 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19925
19926 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19927 - vsyscall_mode == NATIVE
19928 - ? PAGE_KERNEL_VSYSCALL
19929 - : PAGE_KERNEL_VVAR);
19930 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19931 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19932 (unsigned long)VSYSCALL_START);
19933
19934 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19935 index 9796c2f..f686fbf 100644
19936 --- a/arch/x86/kernel/x8664_ksyms_64.c
19937 +++ b/arch/x86/kernel/x8664_ksyms_64.c
19938 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19939 EXPORT_SYMBOL(copy_user_generic_string);
19940 EXPORT_SYMBOL(copy_user_generic_unrolled);
19941 EXPORT_SYMBOL(__copy_user_nocache);
19942 -EXPORT_SYMBOL(_copy_from_user);
19943 -EXPORT_SYMBOL(_copy_to_user);
19944
19945 EXPORT_SYMBOL(copy_page);
19946 EXPORT_SYMBOL(clear_page);
19947 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19948 index 7110911..e8cdee5 100644
19949 --- a/arch/x86/kernel/xsave.c
19950 +++ b/arch/x86/kernel/xsave.c
19951 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19952 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19953 return -EINVAL;
19954
19955 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19956 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19957 fx_sw_user->extended_size -
19958 FP_XSTATE_MAGIC2_SIZE));
19959 if (err)
19960 @@ -266,7 +266,7 @@ fx_only:
19961 * the other extended state.
19962 */
19963 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19964 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19965 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19966 }
19967
19968 /*
19969 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19970 if (use_xsave())
19971 err = restore_user_xstate(buf);
19972 else
19973 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
19974 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19975 buf);
19976 if (unlikely(err)) {
19977 /*
19978 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19979 index 89b02bf..0f6511d 100644
19980 --- a/arch/x86/kvm/cpuid.c
19981 +++ b/arch/x86/kvm/cpuid.c
19982 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19983 struct kvm_cpuid2 *cpuid,
19984 struct kvm_cpuid_entry2 __user *entries)
19985 {
19986 - int r;
19987 + int r, i;
19988
19989 r = -E2BIG;
19990 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19991 goto out;
19992 r = -EFAULT;
19993 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19994 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19995 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19996 goto out;
19997 + for (i = 0; i < cpuid->nent; ++i) {
19998 + struct kvm_cpuid_entry2 cpuid_entry;
19999 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20000 + goto out;
20001 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20002 + }
20003 vcpu->arch.cpuid_nent = cpuid->nent;
20004 kvm_apic_set_version(vcpu);
20005 kvm_x86_ops->cpuid_update(vcpu);
20006 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20007 struct kvm_cpuid2 *cpuid,
20008 struct kvm_cpuid_entry2 __user *entries)
20009 {
20010 - int r;
20011 + int r, i;
20012
20013 r = -E2BIG;
20014 if (cpuid->nent < vcpu->arch.cpuid_nent)
20015 goto out;
20016 r = -EFAULT;
20017 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20018 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20019 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20020 goto out;
20021 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20022 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20023 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20024 + goto out;
20025 + }
20026 return 0;
20027
20028 out:
20029 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20030 index 0982507..7f6d72f 100644
20031 --- a/arch/x86/kvm/emulate.c
20032 +++ b/arch/x86/kvm/emulate.c
20033 @@ -250,6 +250,7 @@ struct gprefix {
20034
20035 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20036 do { \
20037 + unsigned long _tmp; \
20038 __asm__ __volatile__ ( \
20039 _PRE_EFLAGS("0", "4", "2") \
20040 _op _suffix " %"_x"3,%1; " \
20041 @@ -264,8 +265,6 @@ struct gprefix {
20042 /* Raw emulation: instruction has two explicit operands. */
20043 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20044 do { \
20045 - unsigned long _tmp; \
20046 - \
20047 switch ((ctxt)->dst.bytes) { \
20048 case 2: \
20049 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20050 @@ -281,7 +280,6 @@ struct gprefix {
20051
20052 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20053 do { \
20054 - unsigned long _tmp; \
20055 switch ((ctxt)->dst.bytes) { \
20056 case 1: \
20057 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20058 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20059 index cfdc6e0..ab92e84 100644
20060 --- a/arch/x86/kvm/lapic.c
20061 +++ b/arch/x86/kvm/lapic.c
20062 @@ -54,7 +54,7 @@
20063 #define APIC_BUS_CYCLE_NS 1
20064
20065 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20066 -#define apic_debug(fmt, arg...)
20067 +#define apic_debug(fmt, arg...) do {} while (0)
20068
20069 #define APIC_LVT_NUM 6
20070 /* 14 is the version for Xeon and Pentium 8.4.8*/
20071 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20072 index 1561028..0ed7f14 100644
20073 --- a/arch/x86/kvm/paging_tmpl.h
20074 +++ b/arch/x86/kvm/paging_tmpl.h
20075 @@ -197,7 +197,7 @@ retry_walk:
20076 if (unlikely(kvm_is_error_hva(host_addr)))
20077 goto error;
20078
20079 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20080 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20081 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20082 goto error;
20083
20084 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20085 index e385214..f8df033 100644
20086 --- a/arch/x86/kvm/svm.c
20087 +++ b/arch/x86/kvm/svm.c
20088 @@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20089 int cpu = raw_smp_processor_id();
20090
20091 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20092 +
20093 + pax_open_kernel();
20094 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20095 + pax_close_kernel();
20096 +
20097 load_TR_desc();
20098 }
20099
20100 @@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20101 #endif
20102 #endif
20103
20104 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20105 + __set_fs(current_thread_info()->addr_limit);
20106 +#endif
20107 +
20108 reload_tss(vcpu);
20109
20110 local_irq_disable();
20111 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20112 index 3b4c8d8..f457b63 100644
20113 --- a/arch/x86/kvm/vmx.c
20114 +++ b/arch/x86/kvm/vmx.c
20115 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
20116 struct desc_struct *descs;
20117
20118 descs = (void *)gdt->address;
20119 +
20120 + pax_open_kernel();
20121 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20122 + pax_close_kernel();
20123 +
20124 load_TR_desc();
20125 }
20126
20127 @@ -2631,8 +2635,11 @@ static __init int hardware_setup(void)
20128 if (!cpu_has_vmx_flexpriority())
20129 flexpriority_enabled = 0;
20130
20131 - if (!cpu_has_vmx_tpr_shadow())
20132 - kvm_x86_ops->update_cr8_intercept = NULL;
20133 + if (!cpu_has_vmx_tpr_shadow()) {
20134 + pax_open_kernel();
20135 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20136 + pax_close_kernel();
20137 + }
20138
20139 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20140 kvm_disable_largepages();
20141 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
20142 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20143
20144 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20145 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20146 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20147
20148 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20149 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20150 @@ -6184,6 +6191,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20151 "jmp .Lkvm_vmx_return \n\t"
20152 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20153 ".Lkvm_vmx_return: "
20154 +
20155 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20156 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20157 + ".Lkvm_vmx_return2: "
20158 +#endif
20159 +
20160 /* Save guest registers, load host registers, keep flags */
20161 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20162 "pop %0 \n\t"
20163 @@ -6232,6 +6245,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20164 #endif
20165 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20166 [wordsize]"i"(sizeof(ulong))
20167 +
20168 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20169 + ,[cs]"i"(__KERNEL_CS)
20170 +#endif
20171 +
20172 : "cc", "memory"
20173 , R"ax", R"bx", R"di", R"si"
20174 #ifdef CONFIG_X86_64
20175 @@ -6260,7 +6278,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20176 }
20177 }
20178
20179 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20180 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20181 +
20182 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20183 + loadsegment(fs, __KERNEL_PERCPU);
20184 +#endif
20185 +
20186 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20187 + __set_fs(current_thread_info()->addr_limit);
20188 +#endif
20189 +
20190 vmx->loaded_vmcs->launched = 1;
20191
20192 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20193 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20194 index 9cbfc06..943ffa6 100644
20195 --- a/arch/x86/kvm/x86.c
20196 +++ b/arch/x86/kvm/x86.c
20197 @@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
20198 return kvm_set_msr(vcpu, index, *data);
20199 }
20200
20201 +static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
20202 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
20203 {
20204 int version;
20205 @@ -1307,12 +1308,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20206 return 0;
20207 }
20208
20209 +static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) __size_overflow(2);
20210 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20211 {
20212 struct kvm *kvm = vcpu->kvm;
20213 int lm = is_long_mode(vcpu);
20214 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20215 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20216 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20217 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20218 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20219 : kvm->arch.xen_hvm_config.blob_size_32;
20220 u32 page_num = data & ~PAGE_MASK;
20221 @@ -2145,6 +2147,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20222 if (n < msr_list.nmsrs)
20223 goto out;
20224 r = -EFAULT;
20225 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20226 + goto out;
20227 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20228 num_msrs_to_save * sizeof(u32)))
20229 goto out;
20230 @@ -2266,7 +2270,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20231 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20232 struct kvm_interrupt *irq)
20233 {
20234 - if (irq->irq < 0 || irq->irq >= 256)
20235 + if (irq->irq >= 256)
20236 return -EINVAL;
20237 if (irqchip_in_kernel(vcpu->kvm))
20238 return -ENXIO;
20239 @@ -3497,6 +3501,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
20240
20241 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20242 struct kvm_vcpu *vcpu, u32 access,
20243 + struct x86_exception *exception) __size_overflow(1,3);
20244 +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20245 + struct kvm_vcpu *vcpu, u32 access,
20246 struct x86_exception *exception)
20247 {
20248 void *data = val;
20249 @@ -3528,6 +3535,9 @@ out:
20250 /* used for instruction fetching */
20251 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20252 gva_t addr, void *val, unsigned int bytes,
20253 + struct x86_exception *exception) __size_overflow(2,4);
20254 +static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20255 + gva_t addr, void *val, unsigned int bytes,
20256 struct x86_exception *exception)
20257 {
20258 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20259 @@ -3552,6 +3562,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
20260
20261 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20262 gva_t addr, void *val, unsigned int bytes,
20263 + struct x86_exception *exception) __size_overflow(2,4);
20264 +static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20265 + gva_t addr, void *val, unsigned int bytes,
20266 struct x86_exception *exception)
20267 {
20268 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20269 @@ -3665,12 +3678,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
20270 }
20271
20272 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20273 + void *val, int bytes) __size_overflow(2);
20274 +static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20275 void *val, int bytes)
20276 {
20277 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
20278 }
20279
20280 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20281 + void *val, int bytes) __size_overflow(2);
20282 +static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20283 void *val, int bytes)
20284 {
20285 return emulator_write_phys(vcpu, gpa, val, bytes);
20286 @@ -3821,6 +3838,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20287 const void *old,
20288 const void *new,
20289 unsigned int bytes,
20290 + struct x86_exception *exception) __size_overflow(5);
20291 +static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20292 + unsigned long addr,
20293 + const void *old,
20294 + const void *new,
20295 + unsigned int bytes,
20296 struct x86_exception *exception)
20297 {
20298 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20299 @@ -4780,7 +4803,7 @@ static void kvm_set_mmio_spte_mask(void)
20300 kvm_mmu_set_mmio_spte_mask(mask);
20301 }
20302
20303 -int kvm_arch_init(void *opaque)
20304 +int kvm_arch_init(const void *opaque)
20305 {
20306 int r;
20307 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20308 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
20309 index cb80c29..aeee86c 100644
20310 --- a/arch/x86/kvm/x86.h
20311 +++ b/arch/x86/kvm/x86.h
20312 @@ -116,11 +116,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
20313
20314 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
20315 gva_t addr, void *val, unsigned int bytes,
20316 - struct x86_exception *exception);
20317 + struct x86_exception *exception) __size_overflow(2,4);
20318
20319 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20320 gva_t addr, void *val, unsigned int bytes,
20321 - struct x86_exception *exception);
20322 + struct x86_exception *exception) __size_overflow(2,4);
20323
20324 extern u64 host_xcr0;
20325
20326 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20327 index 642d880..44e0f3f 100644
20328 --- a/arch/x86/lguest/boot.c
20329 +++ b/arch/x86/lguest/boot.c
20330 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20331 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20332 * Launcher to reboot us.
20333 */
20334 -static void lguest_restart(char *reason)
20335 +static __noreturn void lguest_restart(char *reason)
20336 {
20337 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20338 + BUG();
20339 }
20340
20341 /*G:050
20342 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
20343 index 042f682..c92afb6 100644
20344 --- a/arch/x86/lib/atomic64_32.c
20345 +++ b/arch/x86/lib/atomic64_32.c
20346 @@ -8,18 +8,30 @@
20347
20348 long long atomic64_read_cx8(long long, const atomic64_t *v);
20349 EXPORT_SYMBOL(atomic64_read_cx8);
20350 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20351 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
20352 long long atomic64_set_cx8(long long, const atomic64_t *v);
20353 EXPORT_SYMBOL(atomic64_set_cx8);
20354 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20355 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
20356 long long atomic64_xchg_cx8(long long, unsigned high);
20357 EXPORT_SYMBOL(atomic64_xchg_cx8);
20358 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
20359 EXPORT_SYMBOL(atomic64_add_return_cx8);
20360 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20361 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
20362 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
20363 EXPORT_SYMBOL(atomic64_sub_return_cx8);
20364 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20365 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
20366 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
20367 EXPORT_SYMBOL(atomic64_inc_return_cx8);
20368 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20369 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
20370 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
20371 EXPORT_SYMBOL(atomic64_dec_return_cx8);
20372 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20373 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
20374 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
20375 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
20376 int atomic64_inc_not_zero_cx8(atomic64_t *v);
20377 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
20378 #ifndef CONFIG_X86_CMPXCHG64
20379 long long atomic64_read_386(long long, const atomic64_t *v);
20380 EXPORT_SYMBOL(atomic64_read_386);
20381 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20382 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
20383 long long atomic64_set_386(long long, const atomic64_t *v);
20384 EXPORT_SYMBOL(atomic64_set_386);
20385 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20386 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
20387 long long atomic64_xchg_386(long long, unsigned high);
20388 EXPORT_SYMBOL(atomic64_xchg_386);
20389 long long atomic64_add_return_386(long long a, atomic64_t *v);
20390 EXPORT_SYMBOL(atomic64_add_return_386);
20391 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20392 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20393 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20394 EXPORT_SYMBOL(atomic64_sub_return_386);
20395 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20396 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20397 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20398 EXPORT_SYMBOL(atomic64_inc_return_386);
20399 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20400 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20401 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20402 EXPORT_SYMBOL(atomic64_dec_return_386);
20403 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20404 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20405 long long atomic64_add_386(long long a, atomic64_t *v);
20406 EXPORT_SYMBOL(atomic64_add_386);
20407 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20408 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20409 long long atomic64_sub_386(long long a, atomic64_t *v);
20410 EXPORT_SYMBOL(atomic64_sub_386);
20411 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20412 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20413 long long atomic64_inc_386(long long a, atomic64_t *v);
20414 EXPORT_SYMBOL(atomic64_inc_386);
20415 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20416 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20417 long long atomic64_dec_386(long long a, atomic64_t *v);
20418 EXPORT_SYMBOL(atomic64_dec_386);
20419 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20420 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20421 long long atomic64_dec_if_positive_386(atomic64_t *v);
20422 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20423 int atomic64_inc_not_zero_386(atomic64_t *v);
20424 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20425 index e8e7e0d..56fd1b0 100644
20426 --- a/arch/x86/lib/atomic64_386_32.S
20427 +++ b/arch/x86/lib/atomic64_386_32.S
20428 @@ -48,6 +48,10 @@ BEGIN(read)
20429 movl (v), %eax
20430 movl 4(v), %edx
20431 RET_ENDP
20432 +BEGIN(read_unchecked)
20433 + movl (v), %eax
20434 + movl 4(v), %edx
20435 +RET_ENDP
20436 #undef v
20437
20438 #define v %esi
20439 @@ -55,6 +59,10 @@ BEGIN(set)
20440 movl %ebx, (v)
20441 movl %ecx, 4(v)
20442 RET_ENDP
20443 +BEGIN(set_unchecked)
20444 + movl %ebx, (v)
20445 + movl %ecx, 4(v)
20446 +RET_ENDP
20447 #undef v
20448
20449 #define v %esi
20450 @@ -70,6 +78,20 @@ RET_ENDP
20451 BEGIN(add)
20452 addl %eax, (v)
20453 adcl %edx, 4(v)
20454 +
20455 +#ifdef CONFIG_PAX_REFCOUNT
20456 + jno 0f
20457 + subl %eax, (v)
20458 + sbbl %edx, 4(v)
20459 + int $4
20460 +0:
20461 + _ASM_EXTABLE(0b, 0b)
20462 +#endif
20463 +
20464 +RET_ENDP
20465 +BEGIN(add_unchecked)
20466 + addl %eax, (v)
20467 + adcl %edx, 4(v)
20468 RET_ENDP
20469 #undef v
20470
20471 @@ -77,6 +99,24 @@ RET_ENDP
20472 BEGIN(add_return)
20473 addl (v), %eax
20474 adcl 4(v), %edx
20475 +
20476 +#ifdef CONFIG_PAX_REFCOUNT
20477 + into
20478 +1234:
20479 + _ASM_EXTABLE(1234b, 2f)
20480 +#endif
20481 +
20482 + movl %eax, (v)
20483 + movl %edx, 4(v)
20484 +
20485 +#ifdef CONFIG_PAX_REFCOUNT
20486 +2:
20487 +#endif
20488 +
20489 +RET_ENDP
20490 +BEGIN(add_return_unchecked)
20491 + addl (v), %eax
20492 + adcl 4(v), %edx
20493 movl %eax, (v)
20494 movl %edx, 4(v)
20495 RET_ENDP
20496 @@ -86,6 +126,20 @@ RET_ENDP
20497 BEGIN(sub)
20498 subl %eax, (v)
20499 sbbl %edx, 4(v)
20500 +
20501 +#ifdef CONFIG_PAX_REFCOUNT
20502 + jno 0f
20503 + addl %eax, (v)
20504 + adcl %edx, 4(v)
20505 + int $4
20506 +0:
20507 + _ASM_EXTABLE(0b, 0b)
20508 +#endif
20509 +
20510 +RET_ENDP
20511 +BEGIN(sub_unchecked)
20512 + subl %eax, (v)
20513 + sbbl %edx, 4(v)
20514 RET_ENDP
20515 #undef v
20516
20517 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20518 sbbl $0, %edx
20519 addl (v), %eax
20520 adcl 4(v), %edx
20521 +
20522 +#ifdef CONFIG_PAX_REFCOUNT
20523 + into
20524 +1234:
20525 + _ASM_EXTABLE(1234b, 2f)
20526 +#endif
20527 +
20528 + movl %eax, (v)
20529 + movl %edx, 4(v)
20530 +
20531 +#ifdef CONFIG_PAX_REFCOUNT
20532 +2:
20533 +#endif
20534 +
20535 +RET_ENDP
20536 +BEGIN(sub_return_unchecked)
20537 + negl %edx
20538 + negl %eax
20539 + sbbl $0, %edx
20540 + addl (v), %eax
20541 + adcl 4(v), %edx
20542 movl %eax, (v)
20543 movl %edx, 4(v)
20544 RET_ENDP
20545 @@ -105,6 +180,20 @@ RET_ENDP
20546 BEGIN(inc)
20547 addl $1, (v)
20548 adcl $0, 4(v)
20549 +
20550 +#ifdef CONFIG_PAX_REFCOUNT
20551 + jno 0f
20552 + subl $1, (v)
20553 + sbbl $0, 4(v)
20554 + int $4
20555 +0:
20556 + _ASM_EXTABLE(0b, 0b)
20557 +#endif
20558 +
20559 +RET_ENDP
20560 +BEGIN(inc_unchecked)
20561 + addl $1, (v)
20562 + adcl $0, 4(v)
20563 RET_ENDP
20564 #undef v
20565
20566 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20567 movl 4(v), %edx
20568 addl $1, %eax
20569 adcl $0, %edx
20570 +
20571 +#ifdef CONFIG_PAX_REFCOUNT
20572 + into
20573 +1234:
20574 + _ASM_EXTABLE(1234b, 2f)
20575 +#endif
20576 +
20577 + movl %eax, (v)
20578 + movl %edx, 4(v)
20579 +
20580 +#ifdef CONFIG_PAX_REFCOUNT
20581 +2:
20582 +#endif
20583 +
20584 +RET_ENDP
20585 +BEGIN(inc_return_unchecked)
20586 + movl (v), %eax
20587 + movl 4(v), %edx
20588 + addl $1, %eax
20589 + adcl $0, %edx
20590 movl %eax, (v)
20591 movl %edx, 4(v)
20592 RET_ENDP
20593 @@ -123,6 +232,20 @@ RET_ENDP
20594 BEGIN(dec)
20595 subl $1, (v)
20596 sbbl $0, 4(v)
20597 +
20598 +#ifdef CONFIG_PAX_REFCOUNT
20599 + jno 0f
20600 + addl $1, (v)
20601 + adcl $0, 4(v)
20602 + int $4
20603 +0:
20604 + _ASM_EXTABLE(0b, 0b)
20605 +#endif
20606 +
20607 +RET_ENDP
20608 +BEGIN(dec_unchecked)
20609 + subl $1, (v)
20610 + sbbl $0, 4(v)
20611 RET_ENDP
20612 #undef v
20613
20614 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20615 movl 4(v), %edx
20616 subl $1, %eax
20617 sbbl $0, %edx
20618 +
20619 +#ifdef CONFIG_PAX_REFCOUNT
20620 + into
20621 +1234:
20622 + _ASM_EXTABLE(1234b, 2f)
20623 +#endif
20624 +
20625 + movl %eax, (v)
20626 + movl %edx, 4(v)
20627 +
20628 +#ifdef CONFIG_PAX_REFCOUNT
20629 +2:
20630 +#endif
20631 +
20632 +RET_ENDP
20633 +BEGIN(dec_return_unchecked)
20634 + movl (v), %eax
20635 + movl 4(v), %edx
20636 + subl $1, %eax
20637 + sbbl $0, %edx
20638 movl %eax, (v)
20639 movl %edx, 4(v)
20640 RET_ENDP
20641 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20642 adcl %edx, %edi
20643 addl (v), %eax
20644 adcl 4(v), %edx
20645 +
20646 +#ifdef CONFIG_PAX_REFCOUNT
20647 + into
20648 +1234:
20649 + _ASM_EXTABLE(1234b, 2f)
20650 +#endif
20651 +
20652 cmpl %eax, %esi
20653 je 3f
20654 1:
20655 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20656 1:
20657 addl $1, %eax
20658 adcl $0, %edx
20659 +
20660 +#ifdef CONFIG_PAX_REFCOUNT
20661 + into
20662 +1234:
20663 + _ASM_EXTABLE(1234b, 2f)
20664 +#endif
20665 +
20666 movl %eax, (v)
20667 movl %edx, 4(v)
20668 movl $1, %eax
20669 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20670 movl 4(v), %edx
20671 subl $1, %eax
20672 sbbl $0, %edx
20673 +
20674 +#ifdef CONFIG_PAX_REFCOUNT
20675 + into
20676 +1234:
20677 + _ASM_EXTABLE(1234b, 1f)
20678 +#endif
20679 +
20680 js 1f
20681 movl %eax, (v)
20682 movl %edx, 4(v)
20683 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20684 index 391a083..d658e9f 100644
20685 --- a/arch/x86/lib/atomic64_cx8_32.S
20686 +++ b/arch/x86/lib/atomic64_cx8_32.S
20687 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20688 CFI_STARTPROC
20689
20690 read64 %ecx
20691 + pax_force_retaddr
20692 ret
20693 CFI_ENDPROC
20694 ENDPROC(atomic64_read_cx8)
20695
20696 +ENTRY(atomic64_read_unchecked_cx8)
20697 + CFI_STARTPROC
20698 +
20699 + read64 %ecx
20700 + pax_force_retaddr
20701 + ret
20702 + CFI_ENDPROC
20703 +ENDPROC(atomic64_read_unchecked_cx8)
20704 +
20705 ENTRY(atomic64_set_cx8)
20706 CFI_STARTPROC
20707
20708 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20709 cmpxchg8b (%esi)
20710 jne 1b
20711
20712 + pax_force_retaddr
20713 ret
20714 CFI_ENDPROC
20715 ENDPROC(atomic64_set_cx8)
20716
20717 +ENTRY(atomic64_set_unchecked_cx8)
20718 + CFI_STARTPROC
20719 +
20720 +1:
20721 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20722 + * are atomic on 586 and newer */
20723 + cmpxchg8b (%esi)
20724 + jne 1b
20725 +
20726 + pax_force_retaddr
20727 + ret
20728 + CFI_ENDPROC
20729 +ENDPROC(atomic64_set_unchecked_cx8)
20730 +
20731 ENTRY(atomic64_xchg_cx8)
20732 CFI_STARTPROC
20733
20734 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20735 cmpxchg8b (%esi)
20736 jne 1b
20737
20738 + pax_force_retaddr
20739 ret
20740 CFI_ENDPROC
20741 ENDPROC(atomic64_xchg_cx8)
20742
20743 -.macro addsub_return func ins insc
20744 -ENTRY(atomic64_\func\()_return_cx8)
20745 +.macro addsub_return func ins insc unchecked=""
20746 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20747 CFI_STARTPROC
20748 SAVE ebp
20749 SAVE ebx
20750 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20751 movl %edx, %ecx
20752 \ins\()l %esi, %ebx
20753 \insc\()l %edi, %ecx
20754 +
20755 +.ifb \unchecked
20756 +#ifdef CONFIG_PAX_REFCOUNT
20757 + into
20758 +2:
20759 + _ASM_EXTABLE(2b, 3f)
20760 +#endif
20761 +.endif
20762 +
20763 LOCK_PREFIX
20764 cmpxchg8b (%ebp)
20765 jne 1b
20766 -
20767 -10:
20768 movl %ebx, %eax
20769 movl %ecx, %edx
20770 +
20771 +.ifb \unchecked
20772 +#ifdef CONFIG_PAX_REFCOUNT
20773 +3:
20774 +#endif
20775 +.endif
20776 +
20777 RESTORE edi
20778 RESTORE esi
20779 RESTORE ebx
20780 RESTORE ebp
20781 + pax_force_retaddr
20782 ret
20783 CFI_ENDPROC
20784 -ENDPROC(atomic64_\func\()_return_cx8)
20785 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20786 .endm
20787
20788 addsub_return add add adc
20789 addsub_return sub sub sbb
20790 +addsub_return add add adc _unchecked
20791 +addsub_return sub sub sbb _unchecked
20792
20793 -.macro incdec_return func ins insc
20794 -ENTRY(atomic64_\func\()_return_cx8)
20795 +.macro incdec_return func ins insc unchecked
20796 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20797 CFI_STARTPROC
20798 SAVE ebx
20799
20800 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20801 movl %edx, %ecx
20802 \ins\()l $1, %ebx
20803 \insc\()l $0, %ecx
20804 +
20805 +.ifb \unchecked
20806 +#ifdef CONFIG_PAX_REFCOUNT
20807 + into
20808 +2:
20809 + _ASM_EXTABLE(2b, 3f)
20810 +#endif
20811 +.endif
20812 +
20813 LOCK_PREFIX
20814 cmpxchg8b (%esi)
20815 jne 1b
20816
20817 -10:
20818 movl %ebx, %eax
20819 movl %ecx, %edx
20820 +
20821 +.ifb \unchecked
20822 +#ifdef CONFIG_PAX_REFCOUNT
20823 +3:
20824 +#endif
20825 +.endif
20826 +
20827 RESTORE ebx
20828 + pax_force_retaddr
20829 ret
20830 CFI_ENDPROC
20831 -ENDPROC(atomic64_\func\()_return_cx8)
20832 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20833 .endm
20834
20835 incdec_return inc add adc
20836 incdec_return dec sub sbb
20837 +incdec_return inc add adc _unchecked
20838 +incdec_return dec sub sbb _unchecked
20839
20840 ENTRY(atomic64_dec_if_positive_cx8)
20841 CFI_STARTPROC
20842 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20843 movl %edx, %ecx
20844 subl $1, %ebx
20845 sbb $0, %ecx
20846 +
20847 +#ifdef CONFIG_PAX_REFCOUNT
20848 + into
20849 +1234:
20850 + _ASM_EXTABLE(1234b, 2f)
20851 +#endif
20852 +
20853 js 2f
20854 LOCK_PREFIX
20855 cmpxchg8b (%esi)
20856 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20857 movl %ebx, %eax
20858 movl %ecx, %edx
20859 RESTORE ebx
20860 + pax_force_retaddr
20861 ret
20862 CFI_ENDPROC
20863 ENDPROC(atomic64_dec_if_positive_cx8)
20864 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20865 movl %edx, %ecx
20866 addl %esi, %ebx
20867 adcl %edi, %ecx
20868 +
20869 +#ifdef CONFIG_PAX_REFCOUNT
20870 + into
20871 +1234:
20872 + _ASM_EXTABLE(1234b, 3f)
20873 +#endif
20874 +
20875 LOCK_PREFIX
20876 cmpxchg8b (%ebp)
20877 jne 1b
20878 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20879 CFI_ADJUST_CFA_OFFSET -8
20880 RESTORE ebx
20881 RESTORE ebp
20882 + pax_force_retaddr
20883 ret
20884 4:
20885 cmpl %edx, 4(%esp)
20886 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20887 movl %edx, %ecx
20888 addl $1, %ebx
20889 adcl $0, %ecx
20890 +
20891 +#ifdef CONFIG_PAX_REFCOUNT
20892 + into
20893 +1234:
20894 + _ASM_EXTABLE(1234b, 3f)
20895 +#endif
20896 +
20897 LOCK_PREFIX
20898 cmpxchg8b (%esi)
20899 jne 1b
20900 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20901 movl $1, %eax
20902 3:
20903 RESTORE ebx
20904 + pax_force_retaddr
20905 ret
20906 4:
20907 testl %edx, %edx
20908 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20909 index 78d16a5..fbcf666 100644
20910 --- a/arch/x86/lib/checksum_32.S
20911 +++ b/arch/x86/lib/checksum_32.S
20912 @@ -28,7 +28,8 @@
20913 #include <linux/linkage.h>
20914 #include <asm/dwarf2.h>
20915 #include <asm/errno.h>
20916 -
20917 +#include <asm/segment.h>
20918 +
20919 /*
20920 * computes a partial checksum, e.g. for TCP/UDP fragments
20921 */
20922 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20923
20924 #define ARGBASE 16
20925 #define FP 12
20926 -
20927 -ENTRY(csum_partial_copy_generic)
20928 +
20929 +ENTRY(csum_partial_copy_generic_to_user)
20930 CFI_STARTPROC
20931 +
20932 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20933 + pushl_cfi %gs
20934 + popl_cfi %es
20935 + jmp csum_partial_copy_generic
20936 +#endif
20937 +
20938 +ENTRY(csum_partial_copy_generic_from_user)
20939 +
20940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20941 + pushl_cfi %gs
20942 + popl_cfi %ds
20943 +#endif
20944 +
20945 +ENTRY(csum_partial_copy_generic)
20946 subl $4,%esp
20947 CFI_ADJUST_CFA_OFFSET 4
20948 pushl_cfi %edi
20949 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20950 jmp 4f
20951 SRC(1: movw (%esi), %bx )
20952 addl $2, %esi
20953 -DST( movw %bx, (%edi) )
20954 +DST( movw %bx, %es:(%edi) )
20955 addl $2, %edi
20956 addw %bx, %ax
20957 adcl $0, %eax
20958 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20959 SRC(1: movl (%esi), %ebx )
20960 SRC( movl 4(%esi), %edx )
20961 adcl %ebx, %eax
20962 -DST( movl %ebx, (%edi) )
20963 +DST( movl %ebx, %es:(%edi) )
20964 adcl %edx, %eax
20965 -DST( movl %edx, 4(%edi) )
20966 +DST( movl %edx, %es:4(%edi) )
20967
20968 SRC( movl 8(%esi), %ebx )
20969 SRC( movl 12(%esi), %edx )
20970 adcl %ebx, %eax
20971 -DST( movl %ebx, 8(%edi) )
20972 +DST( movl %ebx, %es:8(%edi) )
20973 adcl %edx, %eax
20974 -DST( movl %edx, 12(%edi) )
20975 +DST( movl %edx, %es:12(%edi) )
20976
20977 SRC( movl 16(%esi), %ebx )
20978 SRC( movl 20(%esi), %edx )
20979 adcl %ebx, %eax
20980 -DST( movl %ebx, 16(%edi) )
20981 +DST( movl %ebx, %es:16(%edi) )
20982 adcl %edx, %eax
20983 -DST( movl %edx, 20(%edi) )
20984 +DST( movl %edx, %es:20(%edi) )
20985
20986 SRC( movl 24(%esi), %ebx )
20987 SRC( movl 28(%esi), %edx )
20988 adcl %ebx, %eax
20989 -DST( movl %ebx, 24(%edi) )
20990 +DST( movl %ebx, %es:24(%edi) )
20991 adcl %edx, %eax
20992 -DST( movl %edx, 28(%edi) )
20993 +DST( movl %edx, %es:28(%edi) )
20994
20995 lea 32(%esi), %esi
20996 lea 32(%edi), %edi
20997 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20998 shrl $2, %edx # This clears CF
20999 SRC(3: movl (%esi), %ebx )
21000 adcl %ebx, %eax
21001 -DST( movl %ebx, (%edi) )
21002 +DST( movl %ebx, %es:(%edi) )
21003 lea 4(%esi), %esi
21004 lea 4(%edi), %edi
21005 dec %edx
21006 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21007 jb 5f
21008 SRC( movw (%esi), %cx )
21009 leal 2(%esi), %esi
21010 -DST( movw %cx, (%edi) )
21011 +DST( movw %cx, %es:(%edi) )
21012 leal 2(%edi), %edi
21013 je 6f
21014 shll $16,%ecx
21015 SRC(5: movb (%esi), %cl )
21016 -DST( movb %cl, (%edi) )
21017 +DST( movb %cl, %es:(%edi) )
21018 6: addl %ecx, %eax
21019 adcl $0, %eax
21020 7:
21021 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21022
21023 6001:
21024 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21025 - movl $-EFAULT, (%ebx)
21026 + movl $-EFAULT, %ss:(%ebx)
21027
21028 # zero the complete destination - computing the rest
21029 # is too much work
21030 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21031
21032 6002:
21033 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21034 - movl $-EFAULT,(%ebx)
21035 + movl $-EFAULT,%ss:(%ebx)
21036 jmp 5000b
21037
21038 .previous
21039
21040 + pushl_cfi %ss
21041 + popl_cfi %ds
21042 + pushl_cfi %ss
21043 + popl_cfi %es
21044 popl_cfi %ebx
21045 CFI_RESTORE ebx
21046 popl_cfi %esi
21047 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21048 popl_cfi %ecx # equivalent to addl $4,%esp
21049 ret
21050 CFI_ENDPROC
21051 -ENDPROC(csum_partial_copy_generic)
21052 +ENDPROC(csum_partial_copy_generic_to_user)
21053
21054 #else
21055
21056 /* Version for PentiumII/PPro */
21057
21058 #define ROUND1(x) \
21059 + nop; nop; nop; \
21060 SRC(movl x(%esi), %ebx ) ; \
21061 addl %ebx, %eax ; \
21062 - DST(movl %ebx, x(%edi) ) ;
21063 + DST(movl %ebx, %es:x(%edi)) ;
21064
21065 #define ROUND(x) \
21066 + nop; nop; nop; \
21067 SRC(movl x(%esi), %ebx ) ; \
21068 adcl %ebx, %eax ; \
21069 - DST(movl %ebx, x(%edi) ) ;
21070 + DST(movl %ebx, %es:x(%edi)) ;
21071
21072 #define ARGBASE 12
21073 -
21074 -ENTRY(csum_partial_copy_generic)
21075 +
21076 +ENTRY(csum_partial_copy_generic_to_user)
21077 CFI_STARTPROC
21078 +
21079 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21080 + pushl_cfi %gs
21081 + popl_cfi %es
21082 + jmp csum_partial_copy_generic
21083 +#endif
21084 +
21085 +ENTRY(csum_partial_copy_generic_from_user)
21086 +
21087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21088 + pushl_cfi %gs
21089 + popl_cfi %ds
21090 +#endif
21091 +
21092 +ENTRY(csum_partial_copy_generic)
21093 pushl_cfi %ebx
21094 CFI_REL_OFFSET ebx, 0
21095 pushl_cfi %edi
21096 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21097 subl %ebx, %edi
21098 lea -1(%esi),%edx
21099 andl $-32,%edx
21100 - lea 3f(%ebx,%ebx), %ebx
21101 + lea 3f(%ebx,%ebx,2), %ebx
21102 testl %esi, %esi
21103 jmp *%ebx
21104 1: addl $64,%esi
21105 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21106 jb 5f
21107 SRC( movw (%esi), %dx )
21108 leal 2(%esi), %esi
21109 -DST( movw %dx, (%edi) )
21110 +DST( movw %dx, %es:(%edi) )
21111 leal 2(%edi), %edi
21112 je 6f
21113 shll $16,%edx
21114 5:
21115 SRC( movb (%esi), %dl )
21116 -DST( movb %dl, (%edi) )
21117 +DST( movb %dl, %es:(%edi) )
21118 6: addl %edx, %eax
21119 adcl $0, %eax
21120 7:
21121 .section .fixup, "ax"
21122 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21123 - movl $-EFAULT, (%ebx)
21124 + movl $-EFAULT, %ss:(%ebx)
21125 # zero the complete destination (computing the rest is too much work)
21126 movl ARGBASE+8(%esp),%edi # dst
21127 movl ARGBASE+12(%esp),%ecx # len
21128 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21129 rep; stosb
21130 jmp 7b
21131 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21132 - movl $-EFAULT, (%ebx)
21133 + movl $-EFAULT, %ss:(%ebx)
21134 jmp 7b
21135 .previous
21136
21137 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21138 + pushl_cfi %ss
21139 + popl_cfi %ds
21140 + pushl_cfi %ss
21141 + popl_cfi %es
21142 +#endif
21143 +
21144 popl_cfi %esi
21145 CFI_RESTORE esi
21146 popl_cfi %edi
21147 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21148 CFI_RESTORE ebx
21149 ret
21150 CFI_ENDPROC
21151 -ENDPROC(csum_partial_copy_generic)
21152 +ENDPROC(csum_partial_copy_generic_to_user)
21153
21154 #undef ROUND
21155 #undef ROUND1
21156 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21157 index f2145cf..cea889d 100644
21158 --- a/arch/x86/lib/clear_page_64.S
21159 +++ b/arch/x86/lib/clear_page_64.S
21160 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21161 movl $4096/8,%ecx
21162 xorl %eax,%eax
21163 rep stosq
21164 + pax_force_retaddr
21165 ret
21166 CFI_ENDPROC
21167 ENDPROC(clear_page_c)
21168 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21169 movl $4096,%ecx
21170 xorl %eax,%eax
21171 rep stosb
21172 + pax_force_retaddr
21173 ret
21174 CFI_ENDPROC
21175 ENDPROC(clear_page_c_e)
21176 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21177 leaq 64(%rdi),%rdi
21178 jnz .Lloop
21179 nop
21180 + pax_force_retaddr
21181 ret
21182 CFI_ENDPROC
21183 .Lclear_page_end:
21184 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21185
21186 #include <asm/cpufeature.h>
21187
21188 - .section .altinstr_replacement,"ax"
21189 + .section .altinstr_replacement,"a"
21190 1: .byte 0xeb /* jmp <disp8> */
21191 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21192 2: .byte 0xeb /* jmp <disp8> */
21193 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21194 index 1e572c5..2a162cd 100644
21195 --- a/arch/x86/lib/cmpxchg16b_emu.S
21196 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21197 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21198
21199 popf
21200 mov $1, %al
21201 + pax_force_retaddr
21202 ret
21203
21204 not_same:
21205 popf
21206 xor %al,%al
21207 + pax_force_retaddr
21208 ret
21209
21210 CFI_ENDPROC
21211 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21212 index 01c805b..dccb07f 100644
21213 --- a/arch/x86/lib/copy_page_64.S
21214 +++ b/arch/x86/lib/copy_page_64.S
21215 @@ -9,6 +9,7 @@ copy_page_c:
21216 CFI_STARTPROC
21217 movl $4096/8,%ecx
21218 rep movsq
21219 + pax_force_retaddr
21220 ret
21221 CFI_ENDPROC
21222 ENDPROC(copy_page_c)
21223 @@ -39,7 +40,7 @@ ENTRY(copy_page)
21224 movq 16 (%rsi), %rdx
21225 movq 24 (%rsi), %r8
21226 movq 32 (%rsi), %r9
21227 - movq 40 (%rsi), %r10
21228 + movq 40 (%rsi), %r13
21229 movq 48 (%rsi), %r11
21230 movq 56 (%rsi), %r12
21231
21232 @@ -50,7 +51,7 @@ ENTRY(copy_page)
21233 movq %rdx, 16 (%rdi)
21234 movq %r8, 24 (%rdi)
21235 movq %r9, 32 (%rdi)
21236 - movq %r10, 40 (%rdi)
21237 + movq %r13, 40 (%rdi)
21238 movq %r11, 48 (%rdi)
21239 movq %r12, 56 (%rdi)
21240
21241 @@ -69,7 +70,7 @@ ENTRY(copy_page)
21242 movq 16 (%rsi), %rdx
21243 movq 24 (%rsi), %r8
21244 movq 32 (%rsi), %r9
21245 - movq 40 (%rsi), %r10
21246 + movq 40 (%rsi), %r13
21247 movq 48 (%rsi), %r11
21248 movq 56 (%rsi), %r12
21249
21250 @@ -78,7 +79,7 @@ ENTRY(copy_page)
21251 movq %rdx, 16 (%rdi)
21252 movq %r8, 24 (%rdi)
21253 movq %r9, 32 (%rdi)
21254 - movq %r10, 40 (%rdi)
21255 + movq %r13, 40 (%rdi)
21256 movq %r11, 48 (%rdi)
21257 movq %r12, 56 (%rdi)
21258
21259 @@ -95,6 +96,7 @@ ENTRY(copy_page)
21260 CFI_RESTORE r13
21261 addq $3*8,%rsp
21262 CFI_ADJUST_CFA_OFFSET -3*8
21263 + pax_force_retaddr
21264 ret
21265 .Lcopy_page_end:
21266 CFI_ENDPROC
21267 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
21268
21269 #include <asm/cpufeature.h>
21270
21271 - .section .altinstr_replacement,"ax"
21272 + .section .altinstr_replacement,"a"
21273 1: .byte 0xeb /* jmp <disp8> */
21274 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21275 2:
21276 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21277 index 0248402..821c786 100644
21278 --- a/arch/x86/lib/copy_user_64.S
21279 +++ b/arch/x86/lib/copy_user_64.S
21280 @@ -16,6 +16,7 @@
21281 #include <asm/thread_info.h>
21282 #include <asm/cpufeature.h>
21283 #include <asm/alternative-asm.h>
21284 +#include <asm/pgtable.h>
21285
21286 /*
21287 * By placing feature2 after feature1 in altinstructions section, we logically
21288 @@ -29,7 +30,7 @@
21289 .byte 0xe9 /* 32bit jump */
21290 .long \orig-1f /* by default jump to orig */
21291 1:
21292 - .section .altinstr_replacement,"ax"
21293 + .section .altinstr_replacement,"a"
21294 2: .byte 0xe9 /* near jump with 32bit immediate */
21295 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21296 3: .byte 0xe9 /* near jump with 32bit immediate */
21297 @@ -71,47 +72,20 @@
21298 #endif
21299 .endm
21300
21301 -/* Standard copy_to_user with segment limit checking */
21302 -ENTRY(_copy_to_user)
21303 - CFI_STARTPROC
21304 - GET_THREAD_INFO(%rax)
21305 - movq %rdi,%rcx
21306 - addq %rdx,%rcx
21307 - jc bad_to_user
21308 - cmpq TI_addr_limit(%rax),%rcx
21309 - ja bad_to_user
21310 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21311 - copy_user_generic_unrolled,copy_user_generic_string, \
21312 - copy_user_enhanced_fast_string
21313 - CFI_ENDPROC
21314 -ENDPROC(_copy_to_user)
21315 -
21316 -/* Standard copy_from_user with segment limit checking */
21317 -ENTRY(_copy_from_user)
21318 - CFI_STARTPROC
21319 - GET_THREAD_INFO(%rax)
21320 - movq %rsi,%rcx
21321 - addq %rdx,%rcx
21322 - jc bad_from_user
21323 - cmpq TI_addr_limit(%rax),%rcx
21324 - ja bad_from_user
21325 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21326 - copy_user_generic_unrolled,copy_user_generic_string, \
21327 - copy_user_enhanced_fast_string
21328 - CFI_ENDPROC
21329 -ENDPROC(_copy_from_user)
21330 -
21331 .section .fixup,"ax"
21332 /* must zero dest */
21333 ENTRY(bad_from_user)
21334 bad_from_user:
21335 CFI_STARTPROC
21336 + testl %edx,%edx
21337 + js bad_to_user
21338 movl %edx,%ecx
21339 xorl %eax,%eax
21340 rep
21341 stosb
21342 bad_to_user:
21343 movl %edx,%eax
21344 + pax_force_retaddr
21345 ret
21346 CFI_ENDPROC
21347 ENDPROC(bad_from_user)
21348 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21349 jz 17f
21350 1: movq (%rsi),%r8
21351 2: movq 1*8(%rsi),%r9
21352 -3: movq 2*8(%rsi),%r10
21353 +3: movq 2*8(%rsi),%rax
21354 4: movq 3*8(%rsi),%r11
21355 5: movq %r8,(%rdi)
21356 6: movq %r9,1*8(%rdi)
21357 -7: movq %r10,2*8(%rdi)
21358 +7: movq %rax,2*8(%rdi)
21359 8: movq %r11,3*8(%rdi)
21360 9: movq 4*8(%rsi),%r8
21361 10: movq 5*8(%rsi),%r9
21362 -11: movq 6*8(%rsi),%r10
21363 +11: movq 6*8(%rsi),%rax
21364 12: movq 7*8(%rsi),%r11
21365 13: movq %r8,4*8(%rdi)
21366 14: movq %r9,5*8(%rdi)
21367 -15: movq %r10,6*8(%rdi)
21368 +15: movq %rax,6*8(%rdi)
21369 16: movq %r11,7*8(%rdi)
21370 leaq 64(%rsi),%rsi
21371 leaq 64(%rdi),%rdi
21372 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21373 decl %ecx
21374 jnz 21b
21375 23: xor %eax,%eax
21376 + pax_force_retaddr
21377 ret
21378
21379 .section .fixup,"ax"
21380 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21381 3: rep
21382 movsb
21383 4: xorl %eax,%eax
21384 + pax_force_retaddr
21385 ret
21386
21387 .section .fixup,"ax"
21388 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21389 1: rep
21390 movsb
21391 2: xorl %eax,%eax
21392 + pax_force_retaddr
21393 ret
21394
21395 .section .fixup,"ax"
21396 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21397 index cb0c112..e3a6895 100644
21398 --- a/arch/x86/lib/copy_user_nocache_64.S
21399 +++ b/arch/x86/lib/copy_user_nocache_64.S
21400 @@ -8,12 +8,14 @@
21401
21402 #include <linux/linkage.h>
21403 #include <asm/dwarf2.h>
21404 +#include <asm/alternative-asm.h>
21405
21406 #define FIX_ALIGNMENT 1
21407
21408 #include <asm/current.h>
21409 #include <asm/asm-offsets.h>
21410 #include <asm/thread_info.h>
21411 +#include <asm/pgtable.h>
21412
21413 .macro ALIGN_DESTINATION
21414 #ifdef FIX_ALIGNMENT
21415 @@ -50,6 +52,15 @@
21416 */
21417 ENTRY(__copy_user_nocache)
21418 CFI_STARTPROC
21419 +
21420 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21421 + mov $PAX_USER_SHADOW_BASE,%rcx
21422 + cmp %rcx,%rsi
21423 + jae 1f
21424 + add %rcx,%rsi
21425 +1:
21426 +#endif
21427 +
21428 cmpl $8,%edx
21429 jb 20f /* less then 8 bytes, go to byte copy loop */
21430 ALIGN_DESTINATION
21431 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21432 jz 17f
21433 1: movq (%rsi),%r8
21434 2: movq 1*8(%rsi),%r9
21435 -3: movq 2*8(%rsi),%r10
21436 +3: movq 2*8(%rsi),%rax
21437 4: movq 3*8(%rsi),%r11
21438 5: movnti %r8,(%rdi)
21439 6: movnti %r9,1*8(%rdi)
21440 -7: movnti %r10,2*8(%rdi)
21441 +7: movnti %rax,2*8(%rdi)
21442 8: movnti %r11,3*8(%rdi)
21443 9: movq 4*8(%rsi),%r8
21444 10: movq 5*8(%rsi),%r9
21445 -11: movq 6*8(%rsi),%r10
21446 +11: movq 6*8(%rsi),%rax
21447 12: movq 7*8(%rsi),%r11
21448 13: movnti %r8,4*8(%rdi)
21449 14: movnti %r9,5*8(%rdi)
21450 -15: movnti %r10,6*8(%rdi)
21451 +15: movnti %rax,6*8(%rdi)
21452 16: movnti %r11,7*8(%rdi)
21453 leaq 64(%rsi),%rsi
21454 leaq 64(%rdi),%rdi
21455 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21456 jnz 21b
21457 23: xorl %eax,%eax
21458 sfence
21459 + pax_force_retaddr
21460 ret
21461
21462 .section .fixup,"ax"
21463 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21464 index fb903b7..c92b7f7 100644
21465 --- a/arch/x86/lib/csum-copy_64.S
21466 +++ b/arch/x86/lib/csum-copy_64.S
21467 @@ -8,6 +8,7 @@
21468 #include <linux/linkage.h>
21469 #include <asm/dwarf2.h>
21470 #include <asm/errno.h>
21471 +#include <asm/alternative-asm.h>
21472
21473 /*
21474 * Checksum copy with exception handling.
21475 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21476 CFI_RESTORE rbp
21477 addq $7*8, %rsp
21478 CFI_ADJUST_CFA_OFFSET -7*8
21479 + pax_force_retaddr 0, 1
21480 ret
21481 CFI_RESTORE_STATE
21482
21483 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21484 index 459b58a..9570bc7 100644
21485 --- a/arch/x86/lib/csum-wrappers_64.c
21486 +++ b/arch/x86/lib/csum-wrappers_64.c
21487 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21488 len -= 2;
21489 }
21490 }
21491 - isum = csum_partial_copy_generic((__force const void *)src,
21492 +
21493 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21494 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21495 + src += PAX_USER_SHADOW_BASE;
21496 +#endif
21497 +
21498 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21499 dst, len, isum, errp, NULL);
21500 if (unlikely(*errp))
21501 goto out_err;
21502 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21503 }
21504
21505 *errp = 0;
21506 - return csum_partial_copy_generic(src, (void __force *)dst,
21507 +
21508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21509 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21510 + dst += PAX_USER_SHADOW_BASE;
21511 +#endif
21512 +
21513 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21514 len, isum, NULL, errp);
21515 }
21516 EXPORT_SYMBOL(csum_partial_copy_to_user);
21517 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21518 index 51f1504..ddac4c1 100644
21519 --- a/arch/x86/lib/getuser.S
21520 +++ b/arch/x86/lib/getuser.S
21521 @@ -33,15 +33,38 @@
21522 #include <asm/asm-offsets.h>
21523 #include <asm/thread_info.h>
21524 #include <asm/asm.h>
21525 +#include <asm/segment.h>
21526 +#include <asm/pgtable.h>
21527 +#include <asm/alternative-asm.h>
21528 +
21529 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21530 +#define __copyuser_seg gs;
21531 +#else
21532 +#define __copyuser_seg
21533 +#endif
21534
21535 .text
21536 ENTRY(__get_user_1)
21537 CFI_STARTPROC
21538 +
21539 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21540 GET_THREAD_INFO(%_ASM_DX)
21541 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21542 jae bad_get_user
21543 -1: movzb (%_ASM_AX),%edx
21544 +
21545 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21546 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21547 + cmp %_ASM_DX,%_ASM_AX
21548 + jae 1234f
21549 + add %_ASM_DX,%_ASM_AX
21550 +1234:
21551 +#endif
21552 +
21553 +#endif
21554 +
21555 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21556 xor %eax,%eax
21557 + pax_force_retaddr
21558 ret
21559 CFI_ENDPROC
21560 ENDPROC(__get_user_1)
21561 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21562 ENTRY(__get_user_2)
21563 CFI_STARTPROC
21564 add $1,%_ASM_AX
21565 +
21566 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21567 jc bad_get_user
21568 GET_THREAD_INFO(%_ASM_DX)
21569 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21570 jae bad_get_user
21571 -2: movzwl -1(%_ASM_AX),%edx
21572 +
21573 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21574 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21575 + cmp %_ASM_DX,%_ASM_AX
21576 + jae 1234f
21577 + add %_ASM_DX,%_ASM_AX
21578 +1234:
21579 +#endif
21580 +
21581 +#endif
21582 +
21583 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21584 xor %eax,%eax
21585 + pax_force_retaddr
21586 ret
21587 CFI_ENDPROC
21588 ENDPROC(__get_user_2)
21589 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21590 ENTRY(__get_user_4)
21591 CFI_STARTPROC
21592 add $3,%_ASM_AX
21593 +
21594 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21595 jc bad_get_user
21596 GET_THREAD_INFO(%_ASM_DX)
21597 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21598 jae bad_get_user
21599 -3: mov -3(%_ASM_AX),%edx
21600 +
21601 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21602 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21603 + cmp %_ASM_DX,%_ASM_AX
21604 + jae 1234f
21605 + add %_ASM_DX,%_ASM_AX
21606 +1234:
21607 +#endif
21608 +
21609 +#endif
21610 +
21611 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21612 xor %eax,%eax
21613 + pax_force_retaddr
21614 ret
21615 CFI_ENDPROC
21616 ENDPROC(__get_user_4)
21617 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21618 GET_THREAD_INFO(%_ASM_DX)
21619 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21620 jae bad_get_user
21621 +
21622 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21623 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21624 + cmp %_ASM_DX,%_ASM_AX
21625 + jae 1234f
21626 + add %_ASM_DX,%_ASM_AX
21627 +1234:
21628 +#endif
21629 +
21630 4: movq -7(%_ASM_AX),%_ASM_DX
21631 xor %eax,%eax
21632 + pax_force_retaddr
21633 ret
21634 CFI_ENDPROC
21635 ENDPROC(__get_user_8)
21636 @@ -91,6 +152,7 @@ bad_get_user:
21637 CFI_STARTPROC
21638 xor %edx,%edx
21639 mov $(-EFAULT),%_ASM_AX
21640 + pax_force_retaddr
21641 ret
21642 CFI_ENDPROC
21643 END(bad_get_user)
21644 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21645 index 5a1f9f3..ba9f577 100644
21646 --- a/arch/x86/lib/insn.c
21647 +++ b/arch/x86/lib/insn.c
21648 @@ -21,6 +21,11 @@
21649 #include <linux/string.h>
21650 #include <asm/inat.h>
21651 #include <asm/insn.h>
21652 +#ifdef __KERNEL__
21653 +#include <asm/pgtable_types.h>
21654 +#else
21655 +#define ktla_ktva(addr) addr
21656 +#endif
21657
21658 /* Verify next sizeof(t) bytes can be on the same instruction */
21659 #define validate_next(t, insn, n) \
21660 @@ -49,8 +54,8 @@
21661 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21662 {
21663 memset(insn, 0, sizeof(*insn));
21664 - insn->kaddr = kaddr;
21665 - insn->next_byte = kaddr;
21666 + insn->kaddr = ktla_ktva(kaddr);
21667 + insn->next_byte = ktla_ktva(kaddr);
21668 insn->x86_64 = x86_64 ? 1 : 0;
21669 insn->opnd_bytes = 4;
21670 if (x86_64)
21671 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21672 index 05a95e7..326f2fa 100644
21673 --- a/arch/x86/lib/iomap_copy_64.S
21674 +++ b/arch/x86/lib/iomap_copy_64.S
21675 @@ -17,6 +17,7 @@
21676
21677 #include <linux/linkage.h>
21678 #include <asm/dwarf2.h>
21679 +#include <asm/alternative-asm.h>
21680
21681 /*
21682 * override generic version in lib/iomap_copy.c
21683 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21684 CFI_STARTPROC
21685 movl %edx,%ecx
21686 rep movsd
21687 + pax_force_retaddr
21688 ret
21689 CFI_ENDPROC
21690 ENDPROC(__iowrite32_copy)
21691 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21692 index efbf2a0..8893637 100644
21693 --- a/arch/x86/lib/memcpy_64.S
21694 +++ b/arch/x86/lib/memcpy_64.S
21695 @@ -34,6 +34,7 @@
21696 rep movsq
21697 movl %edx, %ecx
21698 rep movsb
21699 + pax_force_retaddr
21700 ret
21701 .Lmemcpy_e:
21702 .previous
21703 @@ -51,6 +52,7 @@
21704
21705 movl %edx, %ecx
21706 rep movsb
21707 + pax_force_retaddr
21708 ret
21709 .Lmemcpy_e_e:
21710 .previous
21711 @@ -81,13 +83,13 @@ ENTRY(memcpy)
21712 */
21713 movq 0*8(%rsi), %r8
21714 movq 1*8(%rsi), %r9
21715 - movq 2*8(%rsi), %r10
21716 + movq 2*8(%rsi), %rcx
21717 movq 3*8(%rsi), %r11
21718 leaq 4*8(%rsi), %rsi
21719
21720 movq %r8, 0*8(%rdi)
21721 movq %r9, 1*8(%rdi)
21722 - movq %r10, 2*8(%rdi)
21723 + movq %rcx, 2*8(%rdi)
21724 movq %r11, 3*8(%rdi)
21725 leaq 4*8(%rdi), %rdi
21726 jae .Lcopy_forward_loop
21727 @@ -110,12 +112,12 @@ ENTRY(memcpy)
21728 subq $0x20, %rdx
21729 movq -1*8(%rsi), %r8
21730 movq -2*8(%rsi), %r9
21731 - movq -3*8(%rsi), %r10
21732 + movq -3*8(%rsi), %rcx
21733 movq -4*8(%rsi), %r11
21734 leaq -4*8(%rsi), %rsi
21735 movq %r8, -1*8(%rdi)
21736 movq %r9, -2*8(%rdi)
21737 - movq %r10, -3*8(%rdi)
21738 + movq %rcx, -3*8(%rdi)
21739 movq %r11, -4*8(%rdi)
21740 leaq -4*8(%rdi), %rdi
21741 jae .Lcopy_backward_loop
21742 @@ -135,12 +137,13 @@ ENTRY(memcpy)
21743 */
21744 movq 0*8(%rsi), %r8
21745 movq 1*8(%rsi), %r9
21746 - movq -2*8(%rsi, %rdx), %r10
21747 + movq -2*8(%rsi, %rdx), %rcx
21748 movq -1*8(%rsi, %rdx), %r11
21749 movq %r8, 0*8(%rdi)
21750 movq %r9, 1*8(%rdi)
21751 - movq %r10, -2*8(%rdi, %rdx)
21752 + movq %rcx, -2*8(%rdi, %rdx)
21753 movq %r11, -1*8(%rdi, %rdx)
21754 + pax_force_retaddr
21755 retq
21756 .p2align 4
21757 .Lless_16bytes:
21758 @@ -153,6 +156,7 @@ ENTRY(memcpy)
21759 movq -1*8(%rsi, %rdx), %r9
21760 movq %r8, 0*8(%rdi)
21761 movq %r9, -1*8(%rdi, %rdx)
21762 + pax_force_retaddr
21763 retq
21764 .p2align 4
21765 .Lless_8bytes:
21766 @@ -166,6 +170,7 @@ ENTRY(memcpy)
21767 movl -4(%rsi, %rdx), %r8d
21768 movl %ecx, (%rdi)
21769 movl %r8d, -4(%rdi, %rdx)
21770 + pax_force_retaddr
21771 retq
21772 .p2align 4
21773 .Lless_3bytes:
21774 @@ -183,6 +188,7 @@ ENTRY(memcpy)
21775 jnz .Lloop_1
21776
21777 .Lend:
21778 + pax_force_retaddr
21779 retq
21780 CFI_ENDPROC
21781 ENDPROC(memcpy)
21782 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21783 index ee16461..c39c199 100644
21784 --- a/arch/x86/lib/memmove_64.S
21785 +++ b/arch/x86/lib/memmove_64.S
21786 @@ -61,13 +61,13 @@ ENTRY(memmove)
21787 5:
21788 sub $0x20, %rdx
21789 movq 0*8(%rsi), %r11
21790 - movq 1*8(%rsi), %r10
21791 + movq 1*8(%rsi), %rcx
21792 movq 2*8(%rsi), %r9
21793 movq 3*8(%rsi), %r8
21794 leaq 4*8(%rsi), %rsi
21795
21796 movq %r11, 0*8(%rdi)
21797 - movq %r10, 1*8(%rdi)
21798 + movq %rcx, 1*8(%rdi)
21799 movq %r9, 2*8(%rdi)
21800 movq %r8, 3*8(%rdi)
21801 leaq 4*8(%rdi), %rdi
21802 @@ -81,10 +81,10 @@ ENTRY(memmove)
21803 4:
21804 movq %rdx, %rcx
21805 movq -8(%rsi, %rdx), %r11
21806 - lea -8(%rdi, %rdx), %r10
21807 + lea -8(%rdi, %rdx), %r9
21808 shrq $3, %rcx
21809 rep movsq
21810 - movq %r11, (%r10)
21811 + movq %r11, (%r9)
21812 jmp 13f
21813 .Lmemmove_end_forward:
21814
21815 @@ -95,14 +95,14 @@ ENTRY(memmove)
21816 7:
21817 movq %rdx, %rcx
21818 movq (%rsi), %r11
21819 - movq %rdi, %r10
21820 + movq %rdi, %r9
21821 leaq -8(%rsi, %rdx), %rsi
21822 leaq -8(%rdi, %rdx), %rdi
21823 shrq $3, %rcx
21824 std
21825 rep movsq
21826 cld
21827 - movq %r11, (%r10)
21828 + movq %r11, (%r9)
21829 jmp 13f
21830
21831 /*
21832 @@ -127,13 +127,13 @@ ENTRY(memmove)
21833 8:
21834 subq $0x20, %rdx
21835 movq -1*8(%rsi), %r11
21836 - movq -2*8(%rsi), %r10
21837 + movq -2*8(%rsi), %rcx
21838 movq -3*8(%rsi), %r9
21839 movq -4*8(%rsi), %r8
21840 leaq -4*8(%rsi), %rsi
21841
21842 movq %r11, -1*8(%rdi)
21843 - movq %r10, -2*8(%rdi)
21844 + movq %rcx, -2*8(%rdi)
21845 movq %r9, -3*8(%rdi)
21846 movq %r8, -4*8(%rdi)
21847 leaq -4*8(%rdi), %rdi
21848 @@ -151,11 +151,11 @@ ENTRY(memmove)
21849 * Move data from 16 bytes to 31 bytes.
21850 */
21851 movq 0*8(%rsi), %r11
21852 - movq 1*8(%rsi), %r10
21853 + movq 1*8(%rsi), %rcx
21854 movq -2*8(%rsi, %rdx), %r9
21855 movq -1*8(%rsi, %rdx), %r8
21856 movq %r11, 0*8(%rdi)
21857 - movq %r10, 1*8(%rdi)
21858 + movq %rcx, 1*8(%rdi)
21859 movq %r9, -2*8(%rdi, %rdx)
21860 movq %r8, -1*8(%rdi, %rdx)
21861 jmp 13f
21862 @@ -167,9 +167,9 @@ ENTRY(memmove)
21863 * Move data from 8 bytes to 15 bytes.
21864 */
21865 movq 0*8(%rsi), %r11
21866 - movq -1*8(%rsi, %rdx), %r10
21867 + movq -1*8(%rsi, %rdx), %r9
21868 movq %r11, 0*8(%rdi)
21869 - movq %r10, -1*8(%rdi, %rdx)
21870 + movq %r9, -1*8(%rdi, %rdx)
21871 jmp 13f
21872 10:
21873 cmpq $4, %rdx
21874 @@ -178,9 +178,9 @@ ENTRY(memmove)
21875 * Move data from 4 bytes to 7 bytes.
21876 */
21877 movl (%rsi), %r11d
21878 - movl -4(%rsi, %rdx), %r10d
21879 + movl -4(%rsi, %rdx), %r9d
21880 movl %r11d, (%rdi)
21881 - movl %r10d, -4(%rdi, %rdx)
21882 + movl %r9d, -4(%rdi, %rdx)
21883 jmp 13f
21884 11:
21885 cmp $2, %rdx
21886 @@ -189,9 +189,9 @@ ENTRY(memmove)
21887 * Move data from 2 bytes to 3 bytes.
21888 */
21889 movw (%rsi), %r11w
21890 - movw -2(%rsi, %rdx), %r10w
21891 + movw -2(%rsi, %rdx), %r9w
21892 movw %r11w, (%rdi)
21893 - movw %r10w, -2(%rdi, %rdx)
21894 + movw %r9w, -2(%rdi, %rdx)
21895 jmp 13f
21896 12:
21897 cmp $1, %rdx
21898 @@ -202,6 +202,7 @@ ENTRY(memmove)
21899 movb (%rsi), %r11b
21900 movb %r11b, (%rdi)
21901 13:
21902 + pax_force_retaddr
21903 retq
21904 CFI_ENDPROC
21905
21906 @@ -210,6 +211,7 @@ ENTRY(memmove)
21907 /* Forward moving data. */
21908 movq %rdx, %rcx
21909 rep movsb
21910 + pax_force_retaddr
21911 retq
21912 .Lmemmove_end_forward_efs:
21913 .previous
21914 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21915 index 79bd454..dff325a 100644
21916 --- a/arch/x86/lib/memset_64.S
21917 +++ b/arch/x86/lib/memset_64.S
21918 @@ -31,6 +31,7 @@
21919 movl %r8d,%ecx
21920 rep stosb
21921 movq %r9,%rax
21922 + pax_force_retaddr
21923 ret
21924 .Lmemset_e:
21925 .previous
21926 @@ -53,6 +54,7 @@
21927 movl %edx,%ecx
21928 rep stosb
21929 movq %r9,%rax
21930 + pax_force_retaddr
21931 ret
21932 .Lmemset_e_e:
21933 .previous
21934 @@ -60,13 +62,13 @@
21935 ENTRY(memset)
21936 ENTRY(__memset)
21937 CFI_STARTPROC
21938 - movq %rdi,%r10
21939 movq %rdx,%r11
21940
21941 /* expand byte value */
21942 movzbl %sil,%ecx
21943 movabs $0x0101010101010101,%rax
21944 mul %rcx /* with rax, clobbers rdx */
21945 + movq %rdi,%rdx
21946
21947 /* align dst */
21948 movl %edi,%r9d
21949 @@ -120,7 +122,8 @@ ENTRY(__memset)
21950 jnz .Lloop_1
21951
21952 .Lende:
21953 - movq %r10,%rax
21954 + movq %rdx,%rax
21955 + pax_force_retaddr
21956 ret
21957
21958 CFI_RESTORE_STATE
21959 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21960 index c9f2d9b..e7fd2c0 100644
21961 --- a/arch/x86/lib/mmx_32.c
21962 +++ b/arch/x86/lib/mmx_32.c
21963 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21964 {
21965 void *p;
21966 int i;
21967 + unsigned long cr0;
21968
21969 if (unlikely(in_interrupt()))
21970 return __memcpy(to, from, len);
21971 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21972 kernel_fpu_begin();
21973
21974 __asm__ __volatile__ (
21975 - "1: prefetch (%0)\n" /* This set is 28 bytes */
21976 - " prefetch 64(%0)\n"
21977 - " prefetch 128(%0)\n"
21978 - " prefetch 192(%0)\n"
21979 - " prefetch 256(%0)\n"
21980 + "1: prefetch (%1)\n" /* This set is 28 bytes */
21981 + " prefetch 64(%1)\n"
21982 + " prefetch 128(%1)\n"
21983 + " prefetch 192(%1)\n"
21984 + " prefetch 256(%1)\n"
21985 "2: \n"
21986 ".section .fixup, \"ax\"\n"
21987 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21988 + "3: \n"
21989 +
21990 +#ifdef CONFIG_PAX_KERNEXEC
21991 + " movl %%cr0, %0\n"
21992 + " movl %0, %%eax\n"
21993 + " andl $0xFFFEFFFF, %%eax\n"
21994 + " movl %%eax, %%cr0\n"
21995 +#endif
21996 +
21997 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21998 +
21999 +#ifdef CONFIG_PAX_KERNEXEC
22000 + " movl %0, %%cr0\n"
22001 +#endif
22002 +
22003 " jmp 2b\n"
22004 ".previous\n"
22005 _ASM_EXTABLE(1b, 3b)
22006 - : : "r" (from));
22007 + : "=&r" (cr0) : "r" (from) : "ax");
22008
22009 for ( ; i > 5; i--) {
22010 __asm__ __volatile__ (
22011 - "1: prefetch 320(%0)\n"
22012 - "2: movq (%0), %%mm0\n"
22013 - " movq 8(%0), %%mm1\n"
22014 - " movq 16(%0), %%mm2\n"
22015 - " movq 24(%0), %%mm3\n"
22016 - " movq %%mm0, (%1)\n"
22017 - " movq %%mm1, 8(%1)\n"
22018 - " movq %%mm2, 16(%1)\n"
22019 - " movq %%mm3, 24(%1)\n"
22020 - " movq 32(%0), %%mm0\n"
22021 - " movq 40(%0), %%mm1\n"
22022 - " movq 48(%0), %%mm2\n"
22023 - " movq 56(%0), %%mm3\n"
22024 - " movq %%mm0, 32(%1)\n"
22025 - " movq %%mm1, 40(%1)\n"
22026 - " movq %%mm2, 48(%1)\n"
22027 - " movq %%mm3, 56(%1)\n"
22028 + "1: prefetch 320(%1)\n"
22029 + "2: movq (%1), %%mm0\n"
22030 + " movq 8(%1), %%mm1\n"
22031 + " movq 16(%1), %%mm2\n"
22032 + " movq 24(%1), %%mm3\n"
22033 + " movq %%mm0, (%2)\n"
22034 + " movq %%mm1, 8(%2)\n"
22035 + " movq %%mm2, 16(%2)\n"
22036 + " movq %%mm3, 24(%2)\n"
22037 + " movq 32(%1), %%mm0\n"
22038 + " movq 40(%1), %%mm1\n"
22039 + " movq 48(%1), %%mm2\n"
22040 + " movq 56(%1), %%mm3\n"
22041 + " movq %%mm0, 32(%2)\n"
22042 + " movq %%mm1, 40(%2)\n"
22043 + " movq %%mm2, 48(%2)\n"
22044 + " movq %%mm3, 56(%2)\n"
22045 ".section .fixup, \"ax\"\n"
22046 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22047 + "3:\n"
22048 +
22049 +#ifdef CONFIG_PAX_KERNEXEC
22050 + " movl %%cr0, %0\n"
22051 + " movl %0, %%eax\n"
22052 + " andl $0xFFFEFFFF, %%eax\n"
22053 + " movl %%eax, %%cr0\n"
22054 +#endif
22055 +
22056 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22057 +
22058 +#ifdef CONFIG_PAX_KERNEXEC
22059 + " movl %0, %%cr0\n"
22060 +#endif
22061 +
22062 " jmp 2b\n"
22063 ".previous\n"
22064 _ASM_EXTABLE(1b, 3b)
22065 - : : "r" (from), "r" (to) : "memory");
22066 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22067
22068 from += 64;
22069 to += 64;
22070 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22071 static void fast_copy_page(void *to, void *from)
22072 {
22073 int i;
22074 + unsigned long cr0;
22075
22076 kernel_fpu_begin();
22077
22078 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22079 * but that is for later. -AV
22080 */
22081 __asm__ __volatile__(
22082 - "1: prefetch (%0)\n"
22083 - " prefetch 64(%0)\n"
22084 - " prefetch 128(%0)\n"
22085 - " prefetch 192(%0)\n"
22086 - " prefetch 256(%0)\n"
22087 + "1: prefetch (%1)\n"
22088 + " prefetch 64(%1)\n"
22089 + " prefetch 128(%1)\n"
22090 + " prefetch 192(%1)\n"
22091 + " prefetch 256(%1)\n"
22092 "2: \n"
22093 ".section .fixup, \"ax\"\n"
22094 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22095 + "3: \n"
22096 +
22097 +#ifdef CONFIG_PAX_KERNEXEC
22098 + " movl %%cr0, %0\n"
22099 + " movl %0, %%eax\n"
22100 + " andl $0xFFFEFFFF, %%eax\n"
22101 + " movl %%eax, %%cr0\n"
22102 +#endif
22103 +
22104 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22105 +
22106 +#ifdef CONFIG_PAX_KERNEXEC
22107 + " movl %0, %%cr0\n"
22108 +#endif
22109 +
22110 " jmp 2b\n"
22111 ".previous\n"
22112 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22113 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22114
22115 for (i = 0; i < (4096-320)/64; i++) {
22116 __asm__ __volatile__ (
22117 - "1: prefetch 320(%0)\n"
22118 - "2: movq (%0), %%mm0\n"
22119 - " movntq %%mm0, (%1)\n"
22120 - " movq 8(%0), %%mm1\n"
22121 - " movntq %%mm1, 8(%1)\n"
22122 - " movq 16(%0), %%mm2\n"
22123 - " movntq %%mm2, 16(%1)\n"
22124 - " movq 24(%0), %%mm3\n"
22125 - " movntq %%mm3, 24(%1)\n"
22126 - " movq 32(%0), %%mm4\n"
22127 - " movntq %%mm4, 32(%1)\n"
22128 - " movq 40(%0), %%mm5\n"
22129 - " movntq %%mm5, 40(%1)\n"
22130 - " movq 48(%0), %%mm6\n"
22131 - " movntq %%mm6, 48(%1)\n"
22132 - " movq 56(%0), %%mm7\n"
22133 - " movntq %%mm7, 56(%1)\n"
22134 + "1: prefetch 320(%1)\n"
22135 + "2: movq (%1), %%mm0\n"
22136 + " movntq %%mm0, (%2)\n"
22137 + " movq 8(%1), %%mm1\n"
22138 + " movntq %%mm1, 8(%2)\n"
22139 + " movq 16(%1), %%mm2\n"
22140 + " movntq %%mm2, 16(%2)\n"
22141 + " movq 24(%1), %%mm3\n"
22142 + " movntq %%mm3, 24(%2)\n"
22143 + " movq 32(%1), %%mm4\n"
22144 + " movntq %%mm4, 32(%2)\n"
22145 + " movq 40(%1), %%mm5\n"
22146 + " movntq %%mm5, 40(%2)\n"
22147 + " movq 48(%1), %%mm6\n"
22148 + " movntq %%mm6, 48(%2)\n"
22149 + " movq 56(%1), %%mm7\n"
22150 + " movntq %%mm7, 56(%2)\n"
22151 ".section .fixup, \"ax\"\n"
22152 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22153 + "3:\n"
22154 +
22155 +#ifdef CONFIG_PAX_KERNEXEC
22156 + " movl %%cr0, %0\n"
22157 + " movl %0, %%eax\n"
22158 + " andl $0xFFFEFFFF, %%eax\n"
22159 + " movl %%eax, %%cr0\n"
22160 +#endif
22161 +
22162 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22163 +
22164 +#ifdef CONFIG_PAX_KERNEXEC
22165 + " movl %0, %%cr0\n"
22166 +#endif
22167 +
22168 " jmp 2b\n"
22169 ".previous\n"
22170 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22171 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22172
22173 from += 64;
22174 to += 64;
22175 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22176 static void fast_copy_page(void *to, void *from)
22177 {
22178 int i;
22179 + unsigned long cr0;
22180
22181 kernel_fpu_begin();
22182
22183 __asm__ __volatile__ (
22184 - "1: prefetch (%0)\n"
22185 - " prefetch 64(%0)\n"
22186 - " prefetch 128(%0)\n"
22187 - " prefetch 192(%0)\n"
22188 - " prefetch 256(%0)\n"
22189 + "1: prefetch (%1)\n"
22190 + " prefetch 64(%1)\n"
22191 + " prefetch 128(%1)\n"
22192 + " prefetch 192(%1)\n"
22193 + " prefetch 256(%1)\n"
22194 "2: \n"
22195 ".section .fixup, \"ax\"\n"
22196 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22197 + "3: \n"
22198 +
22199 +#ifdef CONFIG_PAX_KERNEXEC
22200 + " movl %%cr0, %0\n"
22201 + " movl %0, %%eax\n"
22202 + " andl $0xFFFEFFFF, %%eax\n"
22203 + " movl %%eax, %%cr0\n"
22204 +#endif
22205 +
22206 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22207 +
22208 +#ifdef CONFIG_PAX_KERNEXEC
22209 + " movl %0, %%cr0\n"
22210 +#endif
22211 +
22212 " jmp 2b\n"
22213 ".previous\n"
22214 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22215 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22216
22217 for (i = 0; i < 4096/64; i++) {
22218 __asm__ __volatile__ (
22219 - "1: prefetch 320(%0)\n"
22220 - "2: movq (%0), %%mm0\n"
22221 - " movq 8(%0), %%mm1\n"
22222 - " movq 16(%0), %%mm2\n"
22223 - " movq 24(%0), %%mm3\n"
22224 - " movq %%mm0, (%1)\n"
22225 - " movq %%mm1, 8(%1)\n"
22226 - " movq %%mm2, 16(%1)\n"
22227 - " movq %%mm3, 24(%1)\n"
22228 - " movq 32(%0), %%mm0\n"
22229 - " movq 40(%0), %%mm1\n"
22230 - " movq 48(%0), %%mm2\n"
22231 - " movq 56(%0), %%mm3\n"
22232 - " movq %%mm0, 32(%1)\n"
22233 - " movq %%mm1, 40(%1)\n"
22234 - " movq %%mm2, 48(%1)\n"
22235 - " movq %%mm3, 56(%1)\n"
22236 + "1: prefetch 320(%1)\n"
22237 + "2: movq (%1), %%mm0\n"
22238 + " movq 8(%1), %%mm1\n"
22239 + " movq 16(%1), %%mm2\n"
22240 + " movq 24(%1), %%mm3\n"
22241 + " movq %%mm0, (%2)\n"
22242 + " movq %%mm1, 8(%2)\n"
22243 + " movq %%mm2, 16(%2)\n"
22244 + " movq %%mm3, 24(%2)\n"
22245 + " movq 32(%1), %%mm0\n"
22246 + " movq 40(%1), %%mm1\n"
22247 + " movq 48(%1), %%mm2\n"
22248 + " movq 56(%1), %%mm3\n"
22249 + " movq %%mm0, 32(%2)\n"
22250 + " movq %%mm1, 40(%2)\n"
22251 + " movq %%mm2, 48(%2)\n"
22252 + " movq %%mm3, 56(%2)\n"
22253 ".section .fixup, \"ax\"\n"
22254 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22255 + "3:\n"
22256 +
22257 +#ifdef CONFIG_PAX_KERNEXEC
22258 + " movl %%cr0, %0\n"
22259 + " movl %0, %%eax\n"
22260 + " andl $0xFFFEFFFF, %%eax\n"
22261 + " movl %%eax, %%cr0\n"
22262 +#endif
22263 +
22264 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22265 +
22266 +#ifdef CONFIG_PAX_KERNEXEC
22267 + " movl %0, %%cr0\n"
22268 +#endif
22269 +
22270 " jmp 2b\n"
22271 ".previous\n"
22272 _ASM_EXTABLE(1b, 3b)
22273 - : : "r" (from), "r" (to) : "memory");
22274 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22275
22276 from += 64;
22277 to += 64;
22278 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22279 index 69fa106..adda88b 100644
22280 --- a/arch/x86/lib/msr-reg.S
22281 +++ b/arch/x86/lib/msr-reg.S
22282 @@ -3,6 +3,7 @@
22283 #include <asm/dwarf2.h>
22284 #include <asm/asm.h>
22285 #include <asm/msr.h>
22286 +#include <asm/alternative-asm.h>
22287
22288 #ifdef CONFIG_X86_64
22289 /*
22290 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22291 CFI_STARTPROC
22292 pushq_cfi %rbx
22293 pushq_cfi %rbp
22294 - movq %rdi, %r10 /* Save pointer */
22295 + movq %rdi, %r9 /* Save pointer */
22296 xorl %r11d, %r11d /* Return value */
22297 movl (%rdi), %eax
22298 movl 4(%rdi), %ecx
22299 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22300 movl 28(%rdi), %edi
22301 CFI_REMEMBER_STATE
22302 1: \op
22303 -2: movl %eax, (%r10)
22304 +2: movl %eax, (%r9)
22305 movl %r11d, %eax /* Return value */
22306 - movl %ecx, 4(%r10)
22307 - movl %edx, 8(%r10)
22308 - movl %ebx, 12(%r10)
22309 - movl %ebp, 20(%r10)
22310 - movl %esi, 24(%r10)
22311 - movl %edi, 28(%r10)
22312 + movl %ecx, 4(%r9)
22313 + movl %edx, 8(%r9)
22314 + movl %ebx, 12(%r9)
22315 + movl %ebp, 20(%r9)
22316 + movl %esi, 24(%r9)
22317 + movl %edi, 28(%r9)
22318 popq_cfi %rbp
22319 popq_cfi %rbx
22320 + pax_force_retaddr
22321 ret
22322 3:
22323 CFI_RESTORE_STATE
22324 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22325 index 36b0d15..d381858 100644
22326 --- a/arch/x86/lib/putuser.S
22327 +++ b/arch/x86/lib/putuser.S
22328 @@ -15,7 +15,9 @@
22329 #include <asm/thread_info.h>
22330 #include <asm/errno.h>
22331 #include <asm/asm.h>
22332 -
22333 +#include <asm/segment.h>
22334 +#include <asm/pgtable.h>
22335 +#include <asm/alternative-asm.h>
22336
22337 /*
22338 * __put_user_X
22339 @@ -29,52 +31,119 @@
22340 * as they get called from within inline assembly.
22341 */
22342
22343 -#define ENTER CFI_STARTPROC ; \
22344 - GET_THREAD_INFO(%_ASM_BX)
22345 -#define EXIT ret ; \
22346 +#define ENTER CFI_STARTPROC
22347 +#define EXIT pax_force_retaddr; ret ; \
22348 CFI_ENDPROC
22349
22350 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22351 +#define _DEST %_ASM_CX,%_ASM_BX
22352 +#else
22353 +#define _DEST %_ASM_CX
22354 +#endif
22355 +
22356 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22357 +#define __copyuser_seg gs;
22358 +#else
22359 +#define __copyuser_seg
22360 +#endif
22361 +
22362 .text
22363 ENTRY(__put_user_1)
22364 ENTER
22365 +
22366 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22367 + GET_THREAD_INFO(%_ASM_BX)
22368 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22369 jae bad_put_user
22370 -1: movb %al,(%_ASM_CX)
22371 +
22372 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22373 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22374 + cmp %_ASM_BX,%_ASM_CX
22375 + jb 1234f
22376 + xor %ebx,%ebx
22377 +1234:
22378 +#endif
22379 +
22380 +#endif
22381 +
22382 +1: __copyuser_seg movb %al,(_DEST)
22383 xor %eax,%eax
22384 EXIT
22385 ENDPROC(__put_user_1)
22386
22387 ENTRY(__put_user_2)
22388 ENTER
22389 +
22390 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22391 + GET_THREAD_INFO(%_ASM_BX)
22392 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22393 sub $1,%_ASM_BX
22394 cmp %_ASM_BX,%_ASM_CX
22395 jae bad_put_user
22396 -2: movw %ax,(%_ASM_CX)
22397 +
22398 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22399 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22400 + cmp %_ASM_BX,%_ASM_CX
22401 + jb 1234f
22402 + xor %ebx,%ebx
22403 +1234:
22404 +#endif
22405 +
22406 +#endif
22407 +
22408 +2: __copyuser_seg movw %ax,(_DEST)
22409 xor %eax,%eax
22410 EXIT
22411 ENDPROC(__put_user_2)
22412
22413 ENTRY(__put_user_4)
22414 ENTER
22415 +
22416 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22417 + GET_THREAD_INFO(%_ASM_BX)
22418 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22419 sub $3,%_ASM_BX
22420 cmp %_ASM_BX,%_ASM_CX
22421 jae bad_put_user
22422 -3: movl %eax,(%_ASM_CX)
22423 +
22424 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22425 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22426 + cmp %_ASM_BX,%_ASM_CX
22427 + jb 1234f
22428 + xor %ebx,%ebx
22429 +1234:
22430 +#endif
22431 +
22432 +#endif
22433 +
22434 +3: __copyuser_seg movl %eax,(_DEST)
22435 xor %eax,%eax
22436 EXIT
22437 ENDPROC(__put_user_4)
22438
22439 ENTRY(__put_user_8)
22440 ENTER
22441 +
22442 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22443 + GET_THREAD_INFO(%_ASM_BX)
22444 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22445 sub $7,%_ASM_BX
22446 cmp %_ASM_BX,%_ASM_CX
22447 jae bad_put_user
22448 -4: mov %_ASM_AX,(%_ASM_CX)
22449 +
22450 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22451 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22452 + cmp %_ASM_BX,%_ASM_CX
22453 + jb 1234f
22454 + xor %ebx,%ebx
22455 +1234:
22456 +#endif
22457 +
22458 +#endif
22459 +
22460 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22461 #ifdef CONFIG_X86_32
22462 -5: movl %edx,4(%_ASM_CX)
22463 +5: __copyuser_seg movl %edx,4(_DEST)
22464 #endif
22465 xor %eax,%eax
22466 EXIT
22467 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22468 index 1cad221..de671ee 100644
22469 --- a/arch/x86/lib/rwlock.S
22470 +++ b/arch/x86/lib/rwlock.S
22471 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22472 FRAME
22473 0: LOCK_PREFIX
22474 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22475 +
22476 +#ifdef CONFIG_PAX_REFCOUNT
22477 + jno 1234f
22478 + LOCK_PREFIX
22479 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22480 + int $4
22481 +1234:
22482 + _ASM_EXTABLE(1234b, 1234b)
22483 +#endif
22484 +
22485 1: rep; nop
22486 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22487 jne 1b
22488 LOCK_PREFIX
22489 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22490 +
22491 +#ifdef CONFIG_PAX_REFCOUNT
22492 + jno 1234f
22493 + LOCK_PREFIX
22494 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22495 + int $4
22496 +1234:
22497 + _ASM_EXTABLE(1234b, 1234b)
22498 +#endif
22499 +
22500 jnz 0b
22501 ENDFRAME
22502 + pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 END(__write_lock_failed)
22506 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22507 FRAME
22508 0: LOCK_PREFIX
22509 READ_LOCK_SIZE(inc) (%__lock_ptr)
22510 +
22511 +#ifdef CONFIG_PAX_REFCOUNT
22512 + jno 1234f
22513 + LOCK_PREFIX
22514 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22515 + int $4
22516 +1234:
22517 + _ASM_EXTABLE(1234b, 1234b)
22518 +#endif
22519 +
22520 1: rep; nop
22521 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22522 js 1b
22523 LOCK_PREFIX
22524 READ_LOCK_SIZE(dec) (%__lock_ptr)
22525 +
22526 +#ifdef CONFIG_PAX_REFCOUNT
22527 + jno 1234f
22528 + LOCK_PREFIX
22529 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22530 + int $4
22531 +1234:
22532 + _ASM_EXTABLE(1234b, 1234b)
22533 +#endif
22534 +
22535 js 0b
22536 ENDFRAME
22537 + pax_force_retaddr
22538 ret
22539 CFI_ENDPROC
22540 END(__read_lock_failed)
22541 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22542 index 5dff5f0..cadebf4 100644
22543 --- a/arch/x86/lib/rwsem.S
22544 +++ b/arch/x86/lib/rwsem.S
22545 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22546 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22547 CFI_RESTORE __ASM_REG(dx)
22548 restore_common_regs
22549 + pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(call_rwsem_down_read_failed)
22553 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22554 movq %rax,%rdi
22555 call rwsem_down_write_failed
22556 restore_common_regs
22557 + pax_force_retaddr
22558 ret
22559 CFI_ENDPROC
22560 ENDPROC(call_rwsem_down_write_failed)
22561 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22562 movq %rax,%rdi
22563 call rwsem_wake
22564 restore_common_regs
22565 -1: ret
22566 +1: pax_force_retaddr
22567 + ret
22568 CFI_ENDPROC
22569 ENDPROC(call_rwsem_wake)
22570
22571 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22572 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22573 CFI_RESTORE __ASM_REG(dx)
22574 restore_common_regs
22575 + pax_force_retaddr
22576 ret
22577 CFI_ENDPROC
22578 ENDPROC(call_rwsem_downgrade_wake)
22579 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22580 index a63efd6..ccecad8 100644
22581 --- a/arch/x86/lib/thunk_64.S
22582 +++ b/arch/x86/lib/thunk_64.S
22583 @@ -8,6 +8,7 @@
22584 #include <linux/linkage.h>
22585 #include <asm/dwarf2.h>
22586 #include <asm/calling.h>
22587 +#include <asm/alternative-asm.h>
22588
22589 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22590 .macro THUNK name, func, put_ret_addr_in_rdi=0
22591 @@ -41,5 +42,6 @@
22592 SAVE_ARGS
22593 restore:
22594 RESTORE_ARGS
22595 + pax_force_retaddr
22596 ret
22597 CFI_ENDPROC
22598 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22599 index e218d5d..a99a1eb 100644
22600 --- a/arch/x86/lib/usercopy_32.c
22601 +++ b/arch/x86/lib/usercopy_32.c
22602 @@ -43,7 +43,7 @@ do { \
22603 __asm__ __volatile__( \
22604 " testl %1,%1\n" \
22605 " jz 2f\n" \
22606 - "0: lodsb\n" \
22607 + "0: "__copyuser_seg"lodsb\n" \
22608 " stosb\n" \
22609 " testb %%al,%%al\n" \
22610 " jz 1f\n" \
22611 @@ -128,10 +128,12 @@ do { \
22612 int __d0; \
22613 might_fault(); \
22614 __asm__ __volatile__( \
22615 + __COPYUSER_SET_ES \
22616 "0: rep; stosl\n" \
22617 " movl %2,%0\n" \
22618 "1: rep; stosb\n" \
22619 "2:\n" \
22620 + __COPYUSER_RESTORE_ES \
22621 ".section .fixup,\"ax\"\n" \
22622 "3: lea 0(%2,%0,4),%0\n" \
22623 " jmp 2b\n" \
22624 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22625 might_fault();
22626
22627 __asm__ __volatile__(
22628 + __COPYUSER_SET_ES
22629 " testl %0, %0\n"
22630 " jz 3f\n"
22631 " andl %0,%%ecx\n"
22632 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22633 " subl %%ecx,%0\n"
22634 " addl %0,%%eax\n"
22635 "1:\n"
22636 + __COPYUSER_RESTORE_ES
22637 ".section .fixup,\"ax\"\n"
22638 "2: xorl %%eax,%%eax\n"
22639 " jmp 1b\n"
22640 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22641
22642 #ifdef CONFIG_X86_INTEL_USERCOPY
22643 static unsigned long
22644 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22645 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22646 {
22647 int d0, d1;
22648 __asm__ __volatile__(
22649 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22650 " .align 2,0x90\n"
22651 "3: movl 0(%4), %%eax\n"
22652 "4: movl 4(%4), %%edx\n"
22653 - "5: movl %%eax, 0(%3)\n"
22654 - "6: movl %%edx, 4(%3)\n"
22655 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22656 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22657 "7: movl 8(%4), %%eax\n"
22658 "8: movl 12(%4),%%edx\n"
22659 - "9: movl %%eax, 8(%3)\n"
22660 - "10: movl %%edx, 12(%3)\n"
22661 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22662 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22663 "11: movl 16(%4), %%eax\n"
22664 "12: movl 20(%4), %%edx\n"
22665 - "13: movl %%eax, 16(%3)\n"
22666 - "14: movl %%edx, 20(%3)\n"
22667 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22668 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22669 "15: movl 24(%4), %%eax\n"
22670 "16: movl 28(%4), %%edx\n"
22671 - "17: movl %%eax, 24(%3)\n"
22672 - "18: movl %%edx, 28(%3)\n"
22673 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22674 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22675 "19: movl 32(%4), %%eax\n"
22676 "20: movl 36(%4), %%edx\n"
22677 - "21: movl %%eax, 32(%3)\n"
22678 - "22: movl %%edx, 36(%3)\n"
22679 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22680 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22681 "23: movl 40(%4), %%eax\n"
22682 "24: movl 44(%4), %%edx\n"
22683 - "25: movl %%eax, 40(%3)\n"
22684 - "26: movl %%edx, 44(%3)\n"
22685 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22686 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22687 "27: movl 48(%4), %%eax\n"
22688 "28: movl 52(%4), %%edx\n"
22689 - "29: movl %%eax, 48(%3)\n"
22690 - "30: movl %%edx, 52(%3)\n"
22691 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22692 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22693 "31: movl 56(%4), %%eax\n"
22694 "32: movl 60(%4), %%edx\n"
22695 - "33: movl %%eax, 56(%3)\n"
22696 - "34: movl %%edx, 60(%3)\n"
22697 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22698 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22699 " addl $-64, %0\n"
22700 " addl $64, %4\n"
22701 " addl $64, %3\n"
22702 @@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22703 " shrl $2, %0\n"
22704 " andl $3, %%eax\n"
22705 " cld\n"
22706 + __COPYUSER_SET_ES
22707 "99: rep; movsl\n"
22708 "36: movl %%eax, %0\n"
22709 "37: rep; movsb\n"
22710 "100:\n"
22711 + __COPYUSER_RESTORE_ES
22712 ".section .fixup,\"ax\"\n"
22713 "101: lea 0(%%eax,%0,4),%0\n"
22714 " jmp 100b\n"
22715 @@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22716 }
22717
22718 static unsigned long
22719 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22720 +{
22721 + int d0, d1;
22722 + __asm__ __volatile__(
22723 + " .align 2,0x90\n"
22724 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22725 + " cmpl $67, %0\n"
22726 + " jbe 3f\n"
22727 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22728 + " .align 2,0x90\n"
22729 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22730 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22731 + "5: movl %%eax, 0(%3)\n"
22732 + "6: movl %%edx, 4(%3)\n"
22733 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22734 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22735 + "9: movl %%eax, 8(%3)\n"
22736 + "10: movl %%edx, 12(%3)\n"
22737 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22738 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22739 + "13: movl %%eax, 16(%3)\n"
22740 + "14: movl %%edx, 20(%3)\n"
22741 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22742 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22743 + "17: movl %%eax, 24(%3)\n"
22744 + "18: movl %%edx, 28(%3)\n"
22745 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22746 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22747 + "21: movl %%eax, 32(%3)\n"
22748 + "22: movl %%edx, 36(%3)\n"
22749 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22750 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22751 + "25: movl %%eax, 40(%3)\n"
22752 + "26: movl %%edx, 44(%3)\n"
22753 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22754 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22755 + "29: movl %%eax, 48(%3)\n"
22756 + "30: movl %%edx, 52(%3)\n"
22757 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22758 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22759 + "33: movl %%eax, 56(%3)\n"
22760 + "34: movl %%edx, 60(%3)\n"
22761 + " addl $-64, %0\n"
22762 + " addl $64, %4\n"
22763 + " addl $64, %3\n"
22764 + " cmpl $63, %0\n"
22765 + " ja 1b\n"
22766 + "35: movl %0, %%eax\n"
22767 + " shrl $2, %0\n"
22768 + " andl $3, %%eax\n"
22769 + " cld\n"
22770 + "99: rep; "__copyuser_seg" movsl\n"
22771 + "36: movl %%eax, %0\n"
22772 + "37: rep; "__copyuser_seg" movsb\n"
22773 + "100:\n"
22774 + ".section .fixup,\"ax\"\n"
22775 + "101: lea 0(%%eax,%0,4),%0\n"
22776 + " jmp 100b\n"
22777 + ".previous\n"
22778 + ".section __ex_table,\"a\"\n"
22779 + " .align 4\n"
22780 + " .long 1b,100b\n"
22781 + " .long 2b,100b\n"
22782 + " .long 3b,100b\n"
22783 + " .long 4b,100b\n"
22784 + " .long 5b,100b\n"
22785 + " .long 6b,100b\n"
22786 + " .long 7b,100b\n"
22787 + " .long 8b,100b\n"
22788 + " .long 9b,100b\n"
22789 + " .long 10b,100b\n"
22790 + " .long 11b,100b\n"
22791 + " .long 12b,100b\n"
22792 + " .long 13b,100b\n"
22793 + " .long 14b,100b\n"
22794 + " .long 15b,100b\n"
22795 + " .long 16b,100b\n"
22796 + " .long 17b,100b\n"
22797 + " .long 18b,100b\n"
22798 + " .long 19b,100b\n"
22799 + " .long 20b,100b\n"
22800 + " .long 21b,100b\n"
22801 + " .long 22b,100b\n"
22802 + " .long 23b,100b\n"
22803 + " .long 24b,100b\n"
22804 + " .long 25b,100b\n"
22805 + " .long 26b,100b\n"
22806 + " .long 27b,100b\n"
22807 + " .long 28b,100b\n"
22808 + " .long 29b,100b\n"
22809 + " .long 30b,100b\n"
22810 + " .long 31b,100b\n"
22811 + " .long 32b,100b\n"
22812 + " .long 33b,100b\n"
22813 + " .long 34b,100b\n"
22814 + " .long 35b,100b\n"
22815 + " .long 36b,100b\n"
22816 + " .long 37b,100b\n"
22817 + " .long 99b,101b\n"
22818 + ".previous"
22819 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22820 + : "1"(to), "2"(from), "0"(size)
22821 + : "eax", "edx", "memory");
22822 + return size;
22823 +}
22824 +
22825 +static unsigned long
22826 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
22827 +static unsigned long
22828 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22829 {
22830 int d0, d1;
22831 __asm__ __volatile__(
22832 " .align 2,0x90\n"
22833 - "0: movl 32(%4), %%eax\n"
22834 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22835 " cmpl $67, %0\n"
22836 " jbe 2f\n"
22837 - "1: movl 64(%4), %%eax\n"
22838 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22839 " .align 2,0x90\n"
22840 - "2: movl 0(%4), %%eax\n"
22841 - "21: movl 4(%4), %%edx\n"
22842 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22843 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22844 " movl %%eax, 0(%3)\n"
22845 " movl %%edx, 4(%3)\n"
22846 - "3: movl 8(%4), %%eax\n"
22847 - "31: movl 12(%4),%%edx\n"
22848 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22849 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22850 " movl %%eax, 8(%3)\n"
22851 " movl %%edx, 12(%3)\n"
22852 - "4: movl 16(%4), %%eax\n"
22853 - "41: movl 20(%4), %%edx\n"
22854 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22855 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22856 " movl %%eax, 16(%3)\n"
22857 " movl %%edx, 20(%3)\n"
22858 - "10: movl 24(%4), %%eax\n"
22859 - "51: movl 28(%4), %%edx\n"
22860 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22861 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22862 " movl %%eax, 24(%3)\n"
22863 " movl %%edx, 28(%3)\n"
22864 - "11: movl 32(%4), %%eax\n"
22865 - "61: movl 36(%4), %%edx\n"
22866 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22867 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22868 " movl %%eax, 32(%3)\n"
22869 " movl %%edx, 36(%3)\n"
22870 - "12: movl 40(%4), %%eax\n"
22871 - "71: movl 44(%4), %%edx\n"
22872 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22873 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22874 " movl %%eax, 40(%3)\n"
22875 " movl %%edx, 44(%3)\n"
22876 - "13: movl 48(%4), %%eax\n"
22877 - "81: movl 52(%4), %%edx\n"
22878 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22879 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22880 " movl %%eax, 48(%3)\n"
22881 " movl %%edx, 52(%3)\n"
22882 - "14: movl 56(%4), %%eax\n"
22883 - "91: movl 60(%4), %%edx\n"
22884 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22885 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22886 " movl %%eax, 56(%3)\n"
22887 " movl %%edx, 60(%3)\n"
22888 " addl $-64, %0\n"
22889 @@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22890 " shrl $2, %0\n"
22891 " andl $3, %%eax\n"
22892 " cld\n"
22893 - "6: rep; movsl\n"
22894 + "6: rep; "__copyuser_seg" movsl\n"
22895 " movl %%eax,%0\n"
22896 - "7: rep; movsb\n"
22897 + "7: rep; "__copyuser_seg" movsb\n"
22898 "8:\n"
22899 ".section .fixup,\"ax\"\n"
22900 "9: lea 0(%%eax,%0,4),%0\n"
22901 @@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22902 */
22903
22904 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22905 + const void __user *from, unsigned long size) __size_overflow(3);
22906 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22907 const void __user *from, unsigned long size)
22908 {
22909 int d0, d1;
22910
22911 __asm__ __volatile__(
22912 " .align 2,0x90\n"
22913 - "0: movl 32(%4), %%eax\n"
22914 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22915 " cmpl $67, %0\n"
22916 " jbe 2f\n"
22917 - "1: movl 64(%4), %%eax\n"
22918 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22919 " .align 2,0x90\n"
22920 - "2: movl 0(%4), %%eax\n"
22921 - "21: movl 4(%4), %%edx\n"
22922 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22923 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22924 " movnti %%eax, 0(%3)\n"
22925 " movnti %%edx, 4(%3)\n"
22926 - "3: movl 8(%4), %%eax\n"
22927 - "31: movl 12(%4),%%edx\n"
22928 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22929 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22930 " movnti %%eax, 8(%3)\n"
22931 " movnti %%edx, 12(%3)\n"
22932 - "4: movl 16(%4), %%eax\n"
22933 - "41: movl 20(%4), %%edx\n"
22934 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22935 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22936 " movnti %%eax, 16(%3)\n"
22937 " movnti %%edx, 20(%3)\n"
22938 - "10: movl 24(%4), %%eax\n"
22939 - "51: movl 28(%4), %%edx\n"
22940 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22941 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22942 " movnti %%eax, 24(%3)\n"
22943 " movnti %%edx, 28(%3)\n"
22944 - "11: movl 32(%4), %%eax\n"
22945 - "61: movl 36(%4), %%edx\n"
22946 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22947 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22948 " movnti %%eax, 32(%3)\n"
22949 " movnti %%edx, 36(%3)\n"
22950 - "12: movl 40(%4), %%eax\n"
22951 - "71: movl 44(%4), %%edx\n"
22952 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22953 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22954 " movnti %%eax, 40(%3)\n"
22955 " movnti %%edx, 44(%3)\n"
22956 - "13: movl 48(%4), %%eax\n"
22957 - "81: movl 52(%4), %%edx\n"
22958 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22959 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22960 " movnti %%eax, 48(%3)\n"
22961 " movnti %%edx, 52(%3)\n"
22962 - "14: movl 56(%4), %%eax\n"
22963 - "91: movl 60(%4), %%edx\n"
22964 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22965 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22966 " movnti %%eax, 56(%3)\n"
22967 " movnti %%edx, 60(%3)\n"
22968 " addl $-64, %0\n"
22969 @@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22970 " shrl $2, %0\n"
22971 " andl $3, %%eax\n"
22972 " cld\n"
22973 - "6: rep; movsl\n"
22974 + "6: rep; "__copyuser_seg" movsl\n"
22975 " movl %%eax,%0\n"
22976 - "7: rep; movsb\n"
22977 + "7: rep; "__copyuser_seg" movsb\n"
22978 "8:\n"
22979 ".section .fixup,\"ax\"\n"
22980 "9: lea 0(%%eax,%0,4),%0\n"
22981 @@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22982 }
22983
22984 static unsigned long __copy_user_intel_nocache(void *to,
22985 + const void __user *from, unsigned long size) __size_overflow(3);
22986 +static unsigned long __copy_user_intel_nocache(void *to,
22987 const void __user *from, unsigned long size)
22988 {
22989 int d0, d1;
22990
22991 __asm__ __volatile__(
22992 " .align 2,0x90\n"
22993 - "0: movl 32(%4), %%eax\n"
22994 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22995 " cmpl $67, %0\n"
22996 " jbe 2f\n"
22997 - "1: movl 64(%4), %%eax\n"
22998 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22999 " .align 2,0x90\n"
23000 - "2: movl 0(%4), %%eax\n"
23001 - "21: movl 4(%4), %%edx\n"
23002 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23003 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23004 " movnti %%eax, 0(%3)\n"
23005 " movnti %%edx, 4(%3)\n"
23006 - "3: movl 8(%4), %%eax\n"
23007 - "31: movl 12(%4),%%edx\n"
23008 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23009 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23010 " movnti %%eax, 8(%3)\n"
23011 " movnti %%edx, 12(%3)\n"
23012 - "4: movl 16(%4), %%eax\n"
23013 - "41: movl 20(%4), %%edx\n"
23014 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23015 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23016 " movnti %%eax, 16(%3)\n"
23017 " movnti %%edx, 20(%3)\n"
23018 - "10: movl 24(%4), %%eax\n"
23019 - "51: movl 28(%4), %%edx\n"
23020 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23021 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23022 " movnti %%eax, 24(%3)\n"
23023 " movnti %%edx, 28(%3)\n"
23024 - "11: movl 32(%4), %%eax\n"
23025 - "61: movl 36(%4), %%edx\n"
23026 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23027 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23028 " movnti %%eax, 32(%3)\n"
23029 " movnti %%edx, 36(%3)\n"
23030 - "12: movl 40(%4), %%eax\n"
23031 - "71: movl 44(%4), %%edx\n"
23032 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23033 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23034 " movnti %%eax, 40(%3)\n"
23035 " movnti %%edx, 44(%3)\n"
23036 - "13: movl 48(%4), %%eax\n"
23037 - "81: movl 52(%4), %%edx\n"
23038 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23039 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23040 " movnti %%eax, 48(%3)\n"
23041 " movnti %%edx, 52(%3)\n"
23042 - "14: movl 56(%4), %%eax\n"
23043 - "91: movl 60(%4), %%edx\n"
23044 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23045 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23046 " movnti %%eax, 56(%3)\n"
23047 " movnti %%edx, 60(%3)\n"
23048 " addl $-64, %0\n"
23049 @@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23050 " shrl $2, %0\n"
23051 " andl $3, %%eax\n"
23052 " cld\n"
23053 - "6: rep; movsl\n"
23054 + "6: rep; "__copyuser_seg" movsl\n"
23055 " movl %%eax,%0\n"
23056 - "7: rep; movsb\n"
23057 + "7: rep; "__copyuser_seg" movsb\n"
23058 "8:\n"
23059 ".section .fixup,\"ax\"\n"
23060 "9: lea 0(%%eax,%0,4),%0\n"
23061 @@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23062 */
23063 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23064 unsigned long size);
23065 -unsigned long __copy_user_intel(void __user *to, const void *from,
23066 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23067 + unsigned long size);
23068 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23069 unsigned long size);
23070 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23071 const void __user *from, unsigned long size);
23072 #endif /* CONFIG_X86_INTEL_USERCOPY */
23073
23074 /* Generic arbitrary sized copy. */
23075 -#define __copy_user(to, from, size) \
23076 +#define __copy_user(to, from, size, prefix, set, restore) \
23077 do { \
23078 int __d0, __d1, __d2; \
23079 __asm__ __volatile__( \
23080 + set \
23081 " cmp $7,%0\n" \
23082 " jbe 1f\n" \
23083 " movl %1,%0\n" \
23084 " negl %0\n" \
23085 " andl $7,%0\n" \
23086 " subl %0,%3\n" \
23087 - "4: rep; movsb\n" \
23088 + "4: rep; "prefix"movsb\n" \
23089 " movl %3,%0\n" \
23090 " shrl $2,%0\n" \
23091 " andl $3,%3\n" \
23092 " .align 2,0x90\n" \
23093 - "0: rep; movsl\n" \
23094 + "0: rep; "prefix"movsl\n" \
23095 " movl %3,%0\n" \
23096 - "1: rep; movsb\n" \
23097 + "1: rep; "prefix"movsb\n" \
23098 "2:\n" \
23099 + restore \
23100 ".section .fixup,\"ax\"\n" \
23101 "5: addl %3,%0\n" \
23102 " jmp 2b\n" \
23103 @@ -682,14 +805,14 @@ do { \
23104 " negl %0\n" \
23105 " andl $7,%0\n" \
23106 " subl %0,%3\n" \
23107 - "4: rep; movsb\n" \
23108 + "4: rep; "__copyuser_seg"movsb\n" \
23109 " movl %3,%0\n" \
23110 " shrl $2,%0\n" \
23111 " andl $3,%3\n" \
23112 " .align 2,0x90\n" \
23113 - "0: rep; movsl\n" \
23114 + "0: rep; "__copyuser_seg"movsl\n" \
23115 " movl %3,%0\n" \
23116 - "1: rep; movsb\n" \
23117 + "1: rep; "__copyuser_seg"movsb\n" \
23118 "2:\n" \
23119 ".section .fixup,\"ax\"\n" \
23120 "5: addl %3,%0\n" \
23121 @@ -775,9 +898,9 @@ survive:
23122 }
23123 #endif
23124 if (movsl_is_ok(to, from, n))
23125 - __copy_user(to, from, n);
23126 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23127 else
23128 - n = __copy_user_intel(to, from, n);
23129 + n = __generic_copy_to_user_intel(to, from, n);
23130 return n;
23131 }
23132 EXPORT_SYMBOL(__copy_to_user_ll);
23133 @@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23134 unsigned long n)
23135 {
23136 if (movsl_is_ok(to, from, n))
23137 - __copy_user(to, from, n);
23138 + __copy_user(to, from, n, __copyuser_seg, "", "");
23139 else
23140 - n = __copy_user_intel((void __user *)to,
23141 - (const void *)from, n);
23142 + n = __generic_copy_from_user_intel(to, from, n);
23143 return n;
23144 }
23145 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23146 @@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23147 if (n > 64 && cpu_has_xmm2)
23148 n = __copy_user_intel_nocache(to, from, n);
23149 else
23150 - __copy_user(to, from, n);
23151 + __copy_user(to, from, n, __copyuser_seg, "", "");
23152 #else
23153 - __copy_user(to, from, n);
23154 + __copy_user(to, from, n, __copyuser_seg, "", "");
23155 #endif
23156 return n;
23157 }
23158 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23159
23160 -/**
23161 - * copy_to_user: - Copy a block of data into user space.
23162 - * @to: Destination address, in user space.
23163 - * @from: Source address, in kernel space.
23164 - * @n: Number of bytes to copy.
23165 - *
23166 - * Context: User context only. This function may sleep.
23167 - *
23168 - * Copy data from kernel space to user space.
23169 - *
23170 - * Returns number of bytes that could not be copied.
23171 - * On success, this will be zero.
23172 - */
23173 -unsigned long
23174 -copy_to_user(void __user *to, const void *from, unsigned long n)
23175 -{
23176 - if (access_ok(VERIFY_WRITE, to, n))
23177 - n = __copy_to_user(to, from, n);
23178 - return n;
23179 -}
23180 -EXPORT_SYMBOL(copy_to_user);
23181 -
23182 -/**
23183 - * copy_from_user: - Copy a block of data from user space.
23184 - * @to: Destination address, in kernel space.
23185 - * @from: Source address, in user space.
23186 - * @n: Number of bytes to copy.
23187 - *
23188 - * Context: User context only. This function may sleep.
23189 - *
23190 - * Copy data from user space to kernel space.
23191 - *
23192 - * Returns number of bytes that could not be copied.
23193 - * On success, this will be zero.
23194 - *
23195 - * If some data could not be copied, this function will pad the copied
23196 - * data to the requested size using zero bytes.
23197 - */
23198 -unsigned long
23199 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23200 -{
23201 - if (access_ok(VERIFY_READ, from, n))
23202 - n = __copy_from_user(to, from, n);
23203 - else
23204 - memset(to, 0, n);
23205 - return n;
23206 -}
23207 -EXPORT_SYMBOL(_copy_from_user);
23208 -
23209 void copy_from_user_overflow(void)
23210 {
23211 WARN(1, "Buffer overflow detected!\n");
23212 }
23213 EXPORT_SYMBOL(copy_from_user_overflow);
23214 +
23215 +void copy_to_user_overflow(void)
23216 +{
23217 + WARN(1, "Buffer overflow detected!\n");
23218 +}
23219 +EXPORT_SYMBOL(copy_to_user_overflow);
23220 +
23221 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23222 +void __set_fs(mm_segment_t x)
23223 +{
23224 + switch (x.seg) {
23225 + case 0:
23226 + loadsegment(gs, 0);
23227 + break;
23228 + case TASK_SIZE_MAX:
23229 + loadsegment(gs, __USER_DS);
23230 + break;
23231 + case -1UL:
23232 + loadsegment(gs, __KERNEL_DS);
23233 + break;
23234 + default:
23235 + BUG();
23236 + }
23237 + return;
23238 +}
23239 +EXPORT_SYMBOL(__set_fs);
23240 +
23241 +void set_fs(mm_segment_t x)
23242 +{
23243 + current_thread_info()->addr_limit = x;
23244 + __set_fs(x);
23245 +}
23246 +EXPORT_SYMBOL(set_fs);
23247 +#endif
23248 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23249 index b7c2849..8633ad8 100644
23250 --- a/arch/x86/lib/usercopy_64.c
23251 +++ b/arch/x86/lib/usercopy_64.c
23252 @@ -42,6 +42,12 @@ long
23253 __strncpy_from_user(char *dst, const char __user *src, long count)
23254 {
23255 long res;
23256 +
23257 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23258 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23259 + src += PAX_USER_SHADOW_BASE;
23260 +#endif
23261 +
23262 __do_strncpy_from_user(dst, src, count, res);
23263 return res;
23264 }
23265 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23266 {
23267 long __d0;
23268 might_fault();
23269 +
23270 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23271 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23272 + addr += PAX_USER_SHADOW_BASE;
23273 +#endif
23274 +
23275 /* no memory constraint because it doesn't change any memory gcc knows
23276 about */
23277 asm volatile(
23278 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23279 }
23280 EXPORT_SYMBOL(strlen_user);
23281
23282 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23283 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23284 {
23285 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23286 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23287 - }
23288 - return len;
23289 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23290 +
23291 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23292 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23293 + to += PAX_USER_SHADOW_BASE;
23294 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23295 + from += PAX_USER_SHADOW_BASE;
23296 +#endif
23297 +
23298 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23299 + }
23300 + return len;
23301 }
23302 EXPORT_SYMBOL(copy_in_user);
23303
23304 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23305 * it is not necessary to optimize tail handling.
23306 */
23307 unsigned long
23308 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23309 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23310 {
23311 char c;
23312 unsigned zero_len;
23313 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23314 index 1fb85db..8b3540b 100644
23315 --- a/arch/x86/mm/extable.c
23316 +++ b/arch/x86/mm/extable.c
23317 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23318 const struct exception_table_entry *fixup;
23319
23320 #ifdef CONFIG_PNPBIOS
23321 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23322 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23323 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23324 extern u32 pnp_bios_is_utter_crap;
23325 pnp_bios_is_utter_crap = 1;
23326 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23327 index f0b4caf..d92fd42 100644
23328 --- a/arch/x86/mm/fault.c
23329 +++ b/arch/x86/mm/fault.c
23330 @@ -13,11 +13,18 @@
23331 #include <linux/perf_event.h> /* perf_sw_event */
23332 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23333 #include <linux/prefetch.h> /* prefetchw */
23334 +#include <linux/unistd.h>
23335 +#include <linux/compiler.h>
23336
23337 #include <asm/traps.h> /* dotraplinkage, ... */
23338 #include <asm/pgalloc.h> /* pgd_*(), ... */
23339 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23340 #include <asm/fixmap.h> /* VSYSCALL_START */
23341 +#include <asm/tlbflush.h>
23342 +
23343 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23344 +#include <asm/stacktrace.h>
23345 +#endif
23346
23347 /*
23348 * Page fault error code bits:
23349 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23350 int ret = 0;
23351
23352 /* kprobe_running() needs smp_processor_id() */
23353 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23354 + if (kprobes_built_in() && !user_mode(regs)) {
23355 preempt_disable();
23356 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23357 ret = 1;
23358 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23359 return !instr_lo || (instr_lo>>1) == 1;
23360 case 0x00:
23361 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23362 - if (probe_kernel_address(instr, opcode))
23363 + if (user_mode(regs)) {
23364 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23365 + return 0;
23366 + } else if (probe_kernel_address(instr, opcode))
23367 return 0;
23368
23369 *prefetch = (instr_lo == 0xF) &&
23370 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23371 while (instr < max_instr) {
23372 unsigned char opcode;
23373
23374 - if (probe_kernel_address(instr, opcode))
23375 + if (user_mode(regs)) {
23376 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23377 + break;
23378 + } else if (probe_kernel_address(instr, opcode))
23379 break;
23380
23381 instr++;
23382 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23383 force_sig_info(si_signo, &info, tsk);
23384 }
23385
23386 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23387 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23388 +#endif
23389 +
23390 +#ifdef CONFIG_PAX_EMUTRAMP
23391 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23392 +#endif
23393 +
23394 +#ifdef CONFIG_PAX_PAGEEXEC
23395 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23396 +{
23397 + pgd_t *pgd;
23398 + pud_t *pud;
23399 + pmd_t *pmd;
23400 +
23401 + pgd = pgd_offset(mm, address);
23402 + if (!pgd_present(*pgd))
23403 + return NULL;
23404 + pud = pud_offset(pgd, address);
23405 + if (!pud_present(*pud))
23406 + return NULL;
23407 + pmd = pmd_offset(pud, address);
23408 + if (!pmd_present(*pmd))
23409 + return NULL;
23410 + return pmd;
23411 +}
23412 +#endif
23413 +
23414 DEFINE_SPINLOCK(pgd_lock);
23415 LIST_HEAD(pgd_list);
23416
23417 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23418 for (address = VMALLOC_START & PMD_MASK;
23419 address >= TASK_SIZE && address < FIXADDR_TOP;
23420 address += PMD_SIZE) {
23421 +
23422 +#ifdef CONFIG_PAX_PER_CPU_PGD
23423 + unsigned long cpu;
23424 +#else
23425 struct page *page;
23426 +#endif
23427
23428 spin_lock(&pgd_lock);
23429 +
23430 +#ifdef CONFIG_PAX_PER_CPU_PGD
23431 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23432 + pgd_t *pgd = get_cpu_pgd(cpu);
23433 + pmd_t *ret;
23434 +#else
23435 list_for_each_entry(page, &pgd_list, lru) {
23436 + pgd_t *pgd = page_address(page);
23437 spinlock_t *pgt_lock;
23438 pmd_t *ret;
23439
23440 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23441 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23442
23443 spin_lock(pgt_lock);
23444 - ret = vmalloc_sync_one(page_address(page), address);
23445 +#endif
23446 +
23447 + ret = vmalloc_sync_one(pgd, address);
23448 +
23449 +#ifndef CONFIG_PAX_PER_CPU_PGD
23450 spin_unlock(pgt_lock);
23451 +#endif
23452
23453 if (!ret)
23454 break;
23455 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23456 * an interrupt in the middle of a task switch..
23457 */
23458 pgd_paddr = read_cr3();
23459 +
23460 +#ifdef CONFIG_PAX_PER_CPU_PGD
23461 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23462 +#endif
23463 +
23464 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23465 if (!pmd_k)
23466 return -1;
23467 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23468 * happen within a race in page table update. In the later
23469 * case just flush:
23470 */
23471 +
23472 +#ifdef CONFIG_PAX_PER_CPU_PGD
23473 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23474 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23475 +#else
23476 pgd = pgd_offset(current->active_mm, address);
23477 +#endif
23478 +
23479 pgd_ref = pgd_offset_k(address);
23480 if (pgd_none(*pgd_ref))
23481 return -1;
23482 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23483 static int is_errata100(struct pt_regs *regs, unsigned long address)
23484 {
23485 #ifdef CONFIG_X86_64
23486 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23487 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23488 return 1;
23489 #endif
23490 return 0;
23491 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23492 }
23493
23494 static const char nx_warning[] = KERN_CRIT
23495 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23496 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23497
23498 static void
23499 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23500 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23501 if (!oops_may_print())
23502 return;
23503
23504 - if (error_code & PF_INSTR) {
23505 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23506 unsigned int level;
23507
23508 pte_t *pte = lookup_address(address, &level);
23509
23510 if (pte && pte_present(*pte) && !pte_exec(*pte))
23511 - printk(nx_warning, current_uid());
23512 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23513 }
23514
23515 +#ifdef CONFIG_PAX_KERNEXEC
23516 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23517 + if (current->signal->curr_ip)
23518 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23519 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23520 + else
23521 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23522 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23523 + }
23524 +#endif
23525 +
23526 printk(KERN_ALERT "BUG: unable to handle kernel ");
23527 if (address < PAGE_SIZE)
23528 printk(KERN_CONT "NULL pointer dereference");
23529 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23530 }
23531 #endif
23532
23533 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23534 + if (pax_is_fetch_fault(regs, error_code, address)) {
23535 +
23536 +#ifdef CONFIG_PAX_EMUTRAMP
23537 + switch (pax_handle_fetch_fault(regs)) {
23538 + case 2:
23539 + return;
23540 + }
23541 +#endif
23542 +
23543 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23544 + do_group_exit(SIGKILL);
23545 + }
23546 +#endif
23547 +
23548 if (unlikely(show_unhandled_signals))
23549 show_signal_msg(regs, error_code, address, tsk);
23550
23551 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23552 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23553 printk(KERN_ERR
23554 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23555 - tsk->comm, tsk->pid, address);
23556 + tsk->comm, task_pid_nr(tsk), address);
23557 code = BUS_MCEERR_AR;
23558 }
23559 #endif
23560 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23561 return 1;
23562 }
23563
23564 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23565 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23566 +{
23567 + pte_t *pte;
23568 + pmd_t *pmd;
23569 + spinlock_t *ptl;
23570 + unsigned char pte_mask;
23571 +
23572 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23573 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23574 + return 0;
23575 +
23576 + /* PaX: it's our fault, let's handle it if we can */
23577 +
23578 + /* PaX: take a look at read faults before acquiring any locks */
23579 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23580 + /* instruction fetch attempt from a protected page in user mode */
23581 + up_read(&mm->mmap_sem);
23582 +
23583 +#ifdef CONFIG_PAX_EMUTRAMP
23584 + switch (pax_handle_fetch_fault(regs)) {
23585 + case 2:
23586 + return 1;
23587 + }
23588 +#endif
23589 +
23590 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23591 + do_group_exit(SIGKILL);
23592 + }
23593 +
23594 + pmd = pax_get_pmd(mm, address);
23595 + if (unlikely(!pmd))
23596 + return 0;
23597 +
23598 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23599 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23600 + pte_unmap_unlock(pte, ptl);
23601 + return 0;
23602 + }
23603 +
23604 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23605 + /* write attempt to a protected page in user mode */
23606 + pte_unmap_unlock(pte, ptl);
23607 + return 0;
23608 + }
23609 +
23610 +#ifdef CONFIG_SMP
23611 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23612 +#else
23613 + if (likely(address > get_limit(regs->cs)))
23614 +#endif
23615 + {
23616 + set_pte(pte, pte_mkread(*pte));
23617 + __flush_tlb_one(address);
23618 + pte_unmap_unlock(pte, ptl);
23619 + up_read(&mm->mmap_sem);
23620 + return 1;
23621 + }
23622 +
23623 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23624 +
23625 + /*
23626 + * PaX: fill DTLB with user rights and retry
23627 + */
23628 + __asm__ __volatile__ (
23629 + "orb %2,(%1)\n"
23630 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23631 +/*
23632 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23633 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23634 + * page fault when examined during a TLB load attempt. this is true not only
23635 + * for PTEs holding a non-present entry but also present entries that will
23636 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23637 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23638 + * for our target pages since their PTEs are simply not in the TLBs at all.
23639 +
23640 + * the best thing in omitting it is that we gain around 15-20% speed in the
23641 + * fast path of the page fault handler and can get rid of tracing since we
23642 + * can no longer flush unintended entries.
23643 + */
23644 + "invlpg (%0)\n"
23645 +#endif
23646 + __copyuser_seg"testb $0,(%0)\n"
23647 + "xorb %3,(%1)\n"
23648 + :
23649 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23650 + : "memory", "cc");
23651 + pte_unmap_unlock(pte, ptl);
23652 + up_read(&mm->mmap_sem);
23653 + return 1;
23654 +}
23655 +#endif
23656 +
23657 /*
23658 * Handle a spurious fault caused by a stale TLB entry.
23659 *
23660 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23661 static inline int
23662 access_error(unsigned long error_code, struct vm_area_struct *vma)
23663 {
23664 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23665 + return 1;
23666 +
23667 if (error_code & PF_WRITE) {
23668 /* write, present and write, not present: */
23669 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23670 @@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23671 {
23672 struct vm_area_struct *vma;
23673 struct task_struct *tsk;
23674 - unsigned long address;
23675 struct mm_struct *mm;
23676 int fault;
23677 int write = error_code & PF_WRITE;
23678 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23679 (write ? FAULT_FLAG_WRITE : 0);
23680
23681 - tsk = current;
23682 - mm = tsk->mm;
23683 -
23684 /* Get the faulting address: */
23685 - address = read_cr2();
23686 + unsigned long address = read_cr2();
23687 +
23688 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23689 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23690 + if (!search_exception_tables(regs->ip)) {
23691 + bad_area_nosemaphore(regs, error_code, address);
23692 + return;
23693 + }
23694 + if (address < PAX_USER_SHADOW_BASE) {
23695 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23696 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23697 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23698 + } else
23699 + address -= PAX_USER_SHADOW_BASE;
23700 + }
23701 +#endif
23702 +
23703 + tsk = current;
23704 + mm = tsk->mm;
23705
23706 /*
23707 * Detect and handle instructions that would cause a page fault for
23708 @@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23709 * User-mode registers count as a user access even for any
23710 * potential system fault or CPU buglet:
23711 */
23712 - if (user_mode_vm(regs)) {
23713 + if (user_mode(regs)) {
23714 local_irq_enable();
23715 error_code |= PF_USER;
23716 } else {
23717 @@ -1132,6 +1338,11 @@ retry:
23718 might_sleep();
23719 }
23720
23721 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23722 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23723 + return;
23724 +#endif
23725 +
23726 vma = find_vma(mm, address);
23727 if (unlikely(!vma)) {
23728 bad_area(regs, error_code, address);
23729 @@ -1143,18 +1354,24 @@ retry:
23730 bad_area(regs, error_code, address);
23731 return;
23732 }
23733 - if (error_code & PF_USER) {
23734 - /*
23735 - * Accessing the stack below %sp is always a bug.
23736 - * The large cushion allows instructions like enter
23737 - * and pusha to work. ("enter $65535, $31" pushes
23738 - * 32 pointers and then decrements %sp by 65535.)
23739 - */
23740 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23741 - bad_area(regs, error_code, address);
23742 - return;
23743 - }
23744 + /*
23745 + * Accessing the stack below %sp is always a bug.
23746 + * The large cushion allows instructions like enter
23747 + * and pusha to work. ("enter $65535, $31" pushes
23748 + * 32 pointers and then decrements %sp by 65535.)
23749 + */
23750 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23751 + bad_area(regs, error_code, address);
23752 + return;
23753 }
23754 +
23755 +#ifdef CONFIG_PAX_SEGMEXEC
23756 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23757 + bad_area(regs, error_code, address);
23758 + return;
23759 + }
23760 +#endif
23761 +
23762 if (unlikely(expand_stack(vma, address))) {
23763 bad_area(regs, error_code, address);
23764 return;
23765 @@ -1209,3 +1426,292 @@ good_area:
23766
23767 up_read(&mm->mmap_sem);
23768 }
23769 +
23770 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23771 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23772 +{
23773 + struct mm_struct *mm = current->mm;
23774 + unsigned long ip = regs->ip;
23775 +
23776 + if (v8086_mode(regs))
23777 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23778 +
23779 +#ifdef CONFIG_PAX_PAGEEXEC
23780 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23781 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23782 + return true;
23783 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23784 + return true;
23785 + return false;
23786 + }
23787 +#endif
23788 +
23789 +#ifdef CONFIG_PAX_SEGMEXEC
23790 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23791 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23792 + return true;
23793 + return false;
23794 + }
23795 +#endif
23796 +
23797 + return false;
23798 +}
23799 +#endif
23800 +
23801 +#ifdef CONFIG_PAX_EMUTRAMP
23802 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23803 +{
23804 + int err;
23805 +
23806 + do { /* PaX: libffi trampoline emulation */
23807 + unsigned char mov, jmp;
23808 + unsigned int addr1, addr2;
23809 +
23810 +#ifdef CONFIG_X86_64
23811 + if ((regs->ip + 9) >> 32)
23812 + break;
23813 +#endif
23814 +
23815 + err = get_user(mov, (unsigned char __user *)regs->ip);
23816 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23817 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23818 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23819 +
23820 + if (err)
23821 + break;
23822 +
23823 + if (mov == 0xB8 && jmp == 0xE9) {
23824 + regs->ax = addr1;
23825 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23826 + return 2;
23827 + }
23828 + } while (0);
23829 +
23830 + do { /* PaX: gcc trampoline emulation #1 */
23831 + unsigned char mov1, mov2;
23832 + unsigned short jmp;
23833 + unsigned int addr1, addr2;
23834 +
23835 +#ifdef CONFIG_X86_64
23836 + if ((regs->ip + 11) >> 32)
23837 + break;
23838 +#endif
23839 +
23840 + err = get_user(mov1, (unsigned char __user *)regs->ip);
23841 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23842 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23843 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23844 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23845 +
23846 + if (err)
23847 + break;
23848 +
23849 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23850 + regs->cx = addr1;
23851 + regs->ax = addr2;
23852 + regs->ip = addr2;
23853 + return 2;
23854 + }
23855 + } while (0);
23856 +
23857 + do { /* PaX: gcc trampoline emulation #2 */
23858 + unsigned char mov, jmp;
23859 + unsigned int addr1, addr2;
23860 +
23861 +#ifdef CONFIG_X86_64
23862 + if ((regs->ip + 9) >> 32)
23863 + break;
23864 +#endif
23865 +
23866 + err = get_user(mov, (unsigned char __user *)regs->ip);
23867 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23868 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23869 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23870 +
23871 + if (err)
23872 + break;
23873 +
23874 + if (mov == 0xB9 && jmp == 0xE9) {
23875 + regs->cx = addr1;
23876 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23877 + return 2;
23878 + }
23879 + } while (0);
23880 +
23881 + return 1; /* PaX in action */
23882 +}
23883 +
23884 +#ifdef CONFIG_X86_64
23885 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23886 +{
23887 + int err;
23888 +
23889 + do { /* PaX: libffi trampoline emulation */
23890 + unsigned short mov1, mov2, jmp1;
23891 + unsigned char stcclc, jmp2;
23892 + unsigned long addr1, addr2;
23893 +
23894 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23895 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23896 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23897 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23898 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23899 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23900 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23901 +
23902 + if (err)
23903 + break;
23904 +
23905 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23906 + regs->r11 = addr1;
23907 + regs->r10 = addr2;
23908 + if (stcclc == 0xF8)
23909 + regs->flags &= ~X86_EFLAGS_CF;
23910 + else
23911 + regs->flags |= X86_EFLAGS_CF;
23912 + regs->ip = addr1;
23913 + return 2;
23914 + }
23915 + } while (0);
23916 +
23917 + do { /* PaX: gcc trampoline emulation #1 */
23918 + unsigned short mov1, mov2, jmp1;
23919 + unsigned char jmp2;
23920 + unsigned int addr1;
23921 + unsigned long addr2;
23922 +
23923 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23924 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23925 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23926 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23927 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23928 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23929 +
23930 + if (err)
23931 + break;
23932 +
23933 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23934 + regs->r11 = addr1;
23935 + regs->r10 = addr2;
23936 + regs->ip = addr1;
23937 + return 2;
23938 + }
23939 + } while (0);
23940 +
23941 + do { /* PaX: gcc trampoline emulation #2 */
23942 + unsigned short mov1, mov2, jmp1;
23943 + unsigned char jmp2;
23944 + unsigned long addr1, addr2;
23945 +
23946 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23947 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23948 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23949 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23950 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23951 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23952 +
23953 + if (err)
23954 + break;
23955 +
23956 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23957 + regs->r11 = addr1;
23958 + regs->r10 = addr2;
23959 + regs->ip = addr1;
23960 + return 2;
23961 + }
23962 + } while (0);
23963 +
23964 + return 1; /* PaX in action */
23965 +}
23966 +#endif
23967 +
23968 +/*
23969 + * PaX: decide what to do with offenders (regs->ip = fault address)
23970 + *
23971 + * returns 1 when task should be killed
23972 + * 2 when gcc trampoline was detected
23973 + */
23974 +static int pax_handle_fetch_fault(struct pt_regs *regs)
23975 +{
23976 + if (v8086_mode(regs))
23977 + return 1;
23978 +
23979 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23980 + return 1;
23981 +
23982 +#ifdef CONFIG_X86_32
23983 + return pax_handle_fetch_fault_32(regs);
23984 +#else
23985 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23986 + return pax_handle_fetch_fault_32(regs);
23987 + else
23988 + return pax_handle_fetch_fault_64(regs);
23989 +#endif
23990 +}
23991 +#endif
23992 +
23993 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23994 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23995 +{
23996 + long i;
23997 +
23998 + printk(KERN_ERR "PAX: bytes at PC: ");
23999 + for (i = 0; i < 20; i++) {
24000 + unsigned char c;
24001 + if (get_user(c, (unsigned char __force_user *)pc+i))
24002 + printk(KERN_CONT "?? ");
24003 + else
24004 + printk(KERN_CONT "%02x ", c);
24005 + }
24006 + printk("\n");
24007 +
24008 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24009 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24010 + unsigned long c;
24011 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24012 +#ifdef CONFIG_X86_32
24013 + printk(KERN_CONT "???????? ");
24014 +#else
24015 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24016 + printk(KERN_CONT "???????? ???????? ");
24017 + else
24018 + printk(KERN_CONT "???????????????? ");
24019 +#endif
24020 + } else {
24021 +#ifdef CONFIG_X86_64
24022 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24023 + printk(KERN_CONT "%08x ", (unsigned int)c);
24024 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24025 + } else
24026 +#endif
24027 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24028 + }
24029 + }
24030 + printk("\n");
24031 +}
24032 +#endif
24033 +
24034 +/**
24035 + * probe_kernel_write(): safely attempt to write to a location
24036 + * @dst: address to write to
24037 + * @src: pointer to the data that shall be written
24038 + * @size: size of the data chunk
24039 + *
24040 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24041 + * happens, handle that and return -EFAULT.
24042 + */
24043 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24044 +{
24045 + long ret;
24046 + mm_segment_t old_fs = get_fs();
24047 +
24048 + set_fs(KERNEL_DS);
24049 + pagefault_disable();
24050 + pax_open_kernel();
24051 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24052 + pax_close_kernel();
24053 + pagefault_enable();
24054 + set_fs(old_fs);
24055 +
24056 + return ret ? -EFAULT : 0;
24057 +}
24058 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24059 index dd74e46..7d26398 100644
24060 --- a/arch/x86/mm/gup.c
24061 +++ b/arch/x86/mm/gup.c
24062 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24063 addr = start;
24064 len = (unsigned long) nr_pages << PAGE_SHIFT;
24065 end = start + len;
24066 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24067 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24068 (void __user *)start, len)))
24069 return 0;
24070
24071 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24072 index f4f29b1..5cac4fb 100644
24073 --- a/arch/x86/mm/highmem_32.c
24074 +++ b/arch/x86/mm/highmem_32.c
24075 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24076 idx = type + KM_TYPE_NR*smp_processor_id();
24077 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24078 BUG_ON(!pte_none(*(kmap_pte-idx)));
24079 +
24080 + pax_open_kernel();
24081 set_pte(kmap_pte-idx, mk_pte(page, prot));
24082 + pax_close_kernel();
24083 +
24084 arch_flush_lazy_mmu_mode();
24085
24086 return (void *)vaddr;
24087 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24088 index 8ecbb4b..29efd37 100644
24089 --- a/arch/x86/mm/hugetlbpage.c
24090 +++ b/arch/x86/mm/hugetlbpage.c
24091 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24092 struct hstate *h = hstate_file(file);
24093 struct mm_struct *mm = current->mm;
24094 struct vm_area_struct *vma;
24095 - unsigned long start_addr;
24096 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24097 +
24098 +#ifdef CONFIG_PAX_SEGMEXEC
24099 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24100 + pax_task_size = SEGMEXEC_TASK_SIZE;
24101 +#endif
24102 +
24103 + pax_task_size -= PAGE_SIZE;
24104
24105 if (len > mm->cached_hole_size) {
24106 - start_addr = mm->free_area_cache;
24107 + start_addr = mm->free_area_cache;
24108 } else {
24109 - start_addr = TASK_UNMAPPED_BASE;
24110 - mm->cached_hole_size = 0;
24111 + start_addr = mm->mmap_base;
24112 + mm->cached_hole_size = 0;
24113 }
24114
24115 full_search:
24116 @@ -280,26 +287,27 @@ full_search:
24117
24118 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24119 /* At this point: (!vma || addr < vma->vm_end). */
24120 - if (TASK_SIZE - len < addr) {
24121 + if (pax_task_size - len < addr) {
24122 /*
24123 * Start a new search - just in case we missed
24124 * some holes.
24125 */
24126 - if (start_addr != TASK_UNMAPPED_BASE) {
24127 - start_addr = TASK_UNMAPPED_BASE;
24128 + if (start_addr != mm->mmap_base) {
24129 + start_addr = mm->mmap_base;
24130 mm->cached_hole_size = 0;
24131 goto full_search;
24132 }
24133 return -ENOMEM;
24134 }
24135 - if (!vma || addr + len <= vma->vm_start) {
24136 - mm->free_area_cache = addr + len;
24137 - return addr;
24138 - }
24139 + if (check_heap_stack_gap(vma, addr, len))
24140 + break;
24141 if (addr + mm->cached_hole_size < vma->vm_start)
24142 mm->cached_hole_size = vma->vm_start - addr;
24143 addr = ALIGN(vma->vm_end, huge_page_size(h));
24144 }
24145 +
24146 + mm->free_area_cache = addr + len;
24147 + return addr;
24148 }
24149
24150 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24151 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24152 {
24153 struct hstate *h = hstate_file(file);
24154 struct mm_struct *mm = current->mm;
24155 - struct vm_area_struct *vma, *prev_vma;
24156 - unsigned long base = mm->mmap_base, addr = addr0;
24157 + struct vm_area_struct *vma;
24158 + unsigned long base = mm->mmap_base, addr;
24159 unsigned long largest_hole = mm->cached_hole_size;
24160 - int first_time = 1;
24161
24162 /* don't allow allocations above current base */
24163 if (mm->free_area_cache > base)
24164 @@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24165 largest_hole = 0;
24166 mm->free_area_cache = base;
24167 }
24168 -try_again:
24169 +
24170 /* make sure it can fit in the remaining address space */
24171 if (mm->free_area_cache < len)
24172 goto fail;
24173
24174 /* either no address requested or can't fit in requested address hole */
24175 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24176 + addr = (mm->free_area_cache - len);
24177 do {
24178 + addr &= huge_page_mask(h);
24179 + vma = find_vma(mm, addr);
24180 /*
24181 * Lookup failure means no vma is above this address,
24182 * i.e. return with success:
24183 - */
24184 - vma = find_vma(mm, addr);
24185 - if (!vma)
24186 - return addr;
24187 -
24188 - /*
24189 * new region fits between prev_vma->vm_end and
24190 * vma->vm_start, use it:
24191 */
24192 - prev_vma = vma->vm_prev;
24193 - if (addr + len <= vma->vm_start &&
24194 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24195 + if (check_heap_stack_gap(vma, addr, len)) {
24196 /* remember the address as a hint for next time */
24197 - mm->cached_hole_size = largest_hole;
24198 - return (mm->free_area_cache = addr);
24199 - } else {
24200 - /* pull free_area_cache down to the first hole */
24201 - if (mm->free_area_cache == vma->vm_end) {
24202 - mm->free_area_cache = vma->vm_start;
24203 - mm->cached_hole_size = largest_hole;
24204 - }
24205 + mm->cached_hole_size = largest_hole;
24206 + return (mm->free_area_cache = addr);
24207 + }
24208 + /* pull free_area_cache down to the first hole */
24209 + if (mm->free_area_cache == vma->vm_end) {
24210 + mm->free_area_cache = vma->vm_start;
24211 + mm->cached_hole_size = largest_hole;
24212 }
24213
24214 /* remember the largest hole we saw so far */
24215 if (addr + largest_hole < vma->vm_start)
24216 - largest_hole = vma->vm_start - addr;
24217 + largest_hole = vma->vm_start - addr;
24218
24219 /* try just below the current vma->vm_start */
24220 - addr = (vma->vm_start - len) & huge_page_mask(h);
24221 - } while (len <= vma->vm_start);
24222 + addr = skip_heap_stack_gap(vma, len);
24223 + } while (!IS_ERR_VALUE(addr));
24224
24225 fail:
24226 /*
24227 - * if hint left us with no space for the requested
24228 - * mapping then try again:
24229 - */
24230 - if (first_time) {
24231 - mm->free_area_cache = base;
24232 - largest_hole = 0;
24233 - first_time = 0;
24234 - goto try_again;
24235 - }
24236 - /*
24237 * A failed mmap() very likely causes application failure,
24238 * so fall back to the bottom-up function here. This scenario
24239 * can happen with large stack limits and large mmap()
24240 * allocations.
24241 */
24242 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24243 +
24244 +#ifdef CONFIG_PAX_SEGMEXEC
24245 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24246 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24247 + else
24248 +#endif
24249 +
24250 + mm->mmap_base = TASK_UNMAPPED_BASE;
24251 +
24252 +#ifdef CONFIG_PAX_RANDMMAP
24253 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24254 + mm->mmap_base += mm->delta_mmap;
24255 +#endif
24256 +
24257 + mm->free_area_cache = mm->mmap_base;
24258 mm->cached_hole_size = ~0UL;
24259 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24260 len, pgoff, flags);
24261 @@ -388,6 +392,7 @@ fail:
24262 /*
24263 * Restore the topdown base:
24264 */
24265 + mm->mmap_base = base;
24266 mm->free_area_cache = base;
24267 mm->cached_hole_size = ~0UL;
24268
24269 @@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24270 struct hstate *h = hstate_file(file);
24271 struct mm_struct *mm = current->mm;
24272 struct vm_area_struct *vma;
24273 + unsigned long pax_task_size = TASK_SIZE;
24274
24275 if (len & ~huge_page_mask(h))
24276 return -EINVAL;
24277 - if (len > TASK_SIZE)
24278 +
24279 +#ifdef CONFIG_PAX_SEGMEXEC
24280 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24281 + pax_task_size = SEGMEXEC_TASK_SIZE;
24282 +#endif
24283 +
24284 + pax_task_size -= PAGE_SIZE;
24285 +
24286 + if (len > pax_task_size)
24287 return -ENOMEM;
24288
24289 if (flags & MAP_FIXED) {
24290 @@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24291 if (addr) {
24292 addr = ALIGN(addr, huge_page_size(h));
24293 vma = find_vma(mm, addr);
24294 - if (TASK_SIZE - len >= addr &&
24295 - (!vma || addr + len <= vma->vm_start))
24296 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24297 return addr;
24298 }
24299 if (mm->get_unmapped_area == arch_get_unmapped_area)
24300 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24301 index 6cabf65..77e9c1c 100644
24302 --- a/arch/x86/mm/init.c
24303 +++ b/arch/x86/mm/init.c
24304 @@ -17,6 +17,7 @@
24305 #include <asm/tlb.h>
24306 #include <asm/proto.h>
24307 #include <asm/dma.h> /* for MAX_DMA_PFN */
24308 +#include <asm/desc.h>
24309
24310 unsigned long __initdata pgt_buf_start;
24311 unsigned long __meminitdata pgt_buf_end;
24312 @@ -33,7 +34,7 @@ int direct_gbpages
24313 static void __init find_early_table_space(unsigned long end, int use_pse,
24314 int use_gbpages)
24315 {
24316 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24317 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24318 phys_addr_t base;
24319
24320 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24321 @@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24322 */
24323 int devmem_is_allowed(unsigned long pagenr)
24324 {
24325 +#ifdef CONFIG_GRKERNSEC_KMEM
24326 + /* allow BDA */
24327 + if (!pagenr)
24328 + return 1;
24329 + /* allow EBDA */
24330 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24331 + return 1;
24332 +#else
24333 + if (!pagenr)
24334 + return 1;
24335 +#ifdef CONFIG_VM86
24336 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24337 + return 1;
24338 +#endif
24339 +#endif
24340 +
24341 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24342 + return 1;
24343 +#ifdef CONFIG_GRKERNSEC_KMEM
24344 + /* throw out everything else below 1MB */
24345 if (pagenr <= 256)
24346 - return 1;
24347 + return 0;
24348 +#endif
24349 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24350 return 0;
24351 if (!page_is_ram(pagenr))
24352 @@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24353
24354 void free_initmem(void)
24355 {
24356 +
24357 +#ifdef CONFIG_PAX_KERNEXEC
24358 +#ifdef CONFIG_X86_32
24359 + /* PaX: limit KERNEL_CS to actual size */
24360 + unsigned long addr, limit;
24361 + struct desc_struct d;
24362 + int cpu;
24363 +
24364 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24365 + limit = (limit - 1UL) >> PAGE_SHIFT;
24366 +
24367 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24368 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24369 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24370 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24371 + }
24372 +
24373 + /* PaX: make KERNEL_CS read-only */
24374 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24375 + if (!paravirt_enabled())
24376 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24377 +/*
24378 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24379 + pgd = pgd_offset_k(addr);
24380 + pud = pud_offset(pgd, addr);
24381 + pmd = pmd_offset(pud, addr);
24382 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24383 + }
24384 +*/
24385 +#ifdef CONFIG_X86_PAE
24386 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24387 +/*
24388 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24389 + pgd = pgd_offset_k(addr);
24390 + pud = pud_offset(pgd, addr);
24391 + pmd = pmd_offset(pud, addr);
24392 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24393 + }
24394 +*/
24395 +#endif
24396 +
24397 +#ifdef CONFIG_MODULES
24398 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24399 +#endif
24400 +
24401 +#else
24402 + pgd_t *pgd;
24403 + pud_t *pud;
24404 + pmd_t *pmd;
24405 + unsigned long addr, end;
24406 +
24407 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24408 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24409 + pgd = pgd_offset_k(addr);
24410 + pud = pud_offset(pgd, addr);
24411 + pmd = pmd_offset(pud, addr);
24412 + if (!pmd_present(*pmd))
24413 + continue;
24414 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24415 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24416 + else
24417 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24418 + }
24419 +
24420 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24421 + end = addr + KERNEL_IMAGE_SIZE;
24422 + for (; addr < end; addr += PMD_SIZE) {
24423 + pgd = pgd_offset_k(addr);
24424 + pud = pud_offset(pgd, addr);
24425 + pmd = pmd_offset(pud, addr);
24426 + if (!pmd_present(*pmd))
24427 + continue;
24428 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24429 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24430 + }
24431 +#endif
24432 +
24433 + flush_tlb_all();
24434 +#endif
24435 +
24436 free_init_pages("unused kernel memory",
24437 (unsigned long)(&__init_begin),
24438 (unsigned long)(&__init_end));
24439 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24440 index 8663f6c..829ae76 100644
24441 --- a/arch/x86/mm/init_32.c
24442 +++ b/arch/x86/mm/init_32.c
24443 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24444 }
24445
24446 /*
24447 - * Creates a middle page table and puts a pointer to it in the
24448 - * given global directory entry. This only returns the gd entry
24449 - * in non-PAE compilation mode, since the middle layer is folded.
24450 - */
24451 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24452 -{
24453 - pud_t *pud;
24454 - pmd_t *pmd_table;
24455 -
24456 -#ifdef CONFIG_X86_PAE
24457 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24458 - if (after_bootmem)
24459 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24460 - else
24461 - pmd_table = (pmd_t *)alloc_low_page();
24462 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24463 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24464 - pud = pud_offset(pgd, 0);
24465 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24466 -
24467 - return pmd_table;
24468 - }
24469 -#endif
24470 - pud = pud_offset(pgd, 0);
24471 - pmd_table = pmd_offset(pud, 0);
24472 -
24473 - return pmd_table;
24474 -}
24475 -
24476 -/*
24477 * Create a page table and place a pointer to it in a middle page
24478 * directory entry:
24479 */
24480 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24481 page_table = (pte_t *)alloc_low_page();
24482
24483 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24484 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24485 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24486 +#else
24487 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24488 +#endif
24489 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24490 }
24491
24492 return pte_offset_kernel(pmd, 0);
24493 }
24494
24495 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24496 +{
24497 + pud_t *pud;
24498 + pmd_t *pmd_table;
24499 +
24500 + pud = pud_offset(pgd, 0);
24501 + pmd_table = pmd_offset(pud, 0);
24502 +
24503 + return pmd_table;
24504 +}
24505 +
24506 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24507 {
24508 int pgd_idx = pgd_index(vaddr);
24509 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24510 int pgd_idx, pmd_idx;
24511 unsigned long vaddr;
24512 pgd_t *pgd;
24513 + pud_t *pud;
24514 pmd_t *pmd;
24515 pte_t *pte = NULL;
24516
24517 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24518 pgd = pgd_base + pgd_idx;
24519
24520 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24521 - pmd = one_md_table_init(pgd);
24522 - pmd = pmd + pmd_index(vaddr);
24523 + pud = pud_offset(pgd, vaddr);
24524 + pmd = pmd_offset(pud, vaddr);
24525 +
24526 +#ifdef CONFIG_X86_PAE
24527 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24528 +#endif
24529 +
24530 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24531 pmd++, pmd_idx++) {
24532 pte = page_table_kmap_check(one_page_table_init(pmd),
24533 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24534 }
24535 }
24536
24537 -static inline int is_kernel_text(unsigned long addr)
24538 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24539 {
24540 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24541 - return 1;
24542 - return 0;
24543 + if ((start > ktla_ktva((unsigned long)_etext) ||
24544 + end <= ktla_ktva((unsigned long)_stext)) &&
24545 + (start > ktla_ktva((unsigned long)_einittext) ||
24546 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24547 +
24548 +#ifdef CONFIG_ACPI_SLEEP
24549 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24550 +#endif
24551 +
24552 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24553 + return 0;
24554 + return 1;
24555 }
24556
24557 /*
24558 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24559 unsigned long last_map_addr = end;
24560 unsigned long start_pfn, end_pfn;
24561 pgd_t *pgd_base = swapper_pg_dir;
24562 - int pgd_idx, pmd_idx, pte_ofs;
24563 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24564 unsigned long pfn;
24565 pgd_t *pgd;
24566 + pud_t *pud;
24567 pmd_t *pmd;
24568 pte_t *pte;
24569 unsigned pages_2m, pages_4k;
24570 @@ -281,8 +282,13 @@ repeat:
24571 pfn = start_pfn;
24572 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24573 pgd = pgd_base + pgd_idx;
24574 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24575 - pmd = one_md_table_init(pgd);
24576 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24577 + pud = pud_offset(pgd, 0);
24578 + pmd = pmd_offset(pud, 0);
24579 +
24580 +#ifdef CONFIG_X86_PAE
24581 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24582 +#endif
24583
24584 if (pfn >= end_pfn)
24585 continue;
24586 @@ -294,14 +300,13 @@ repeat:
24587 #endif
24588 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24589 pmd++, pmd_idx++) {
24590 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24591 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24592
24593 /*
24594 * Map with big pages if possible, otherwise
24595 * create normal page tables:
24596 */
24597 if (use_pse) {
24598 - unsigned int addr2;
24599 pgprot_t prot = PAGE_KERNEL_LARGE;
24600 /*
24601 * first pass will use the same initial
24602 @@ -311,11 +316,7 @@ repeat:
24603 __pgprot(PTE_IDENT_ATTR |
24604 _PAGE_PSE);
24605
24606 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24607 - PAGE_OFFSET + PAGE_SIZE-1;
24608 -
24609 - if (is_kernel_text(addr) ||
24610 - is_kernel_text(addr2))
24611 + if (is_kernel_text(address, address + PMD_SIZE))
24612 prot = PAGE_KERNEL_LARGE_EXEC;
24613
24614 pages_2m++;
24615 @@ -332,7 +333,7 @@ repeat:
24616 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24617 pte += pte_ofs;
24618 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24619 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24620 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24621 pgprot_t prot = PAGE_KERNEL;
24622 /*
24623 * first pass will use the same initial
24624 @@ -340,7 +341,7 @@ repeat:
24625 */
24626 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24627
24628 - if (is_kernel_text(addr))
24629 + if (is_kernel_text(address, address + PAGE_SIZE))
24630 prot = PAGE_KERNEL_EXEC;
24631
24632 pages_4k++;
24633 @@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24634
24635 pud = pud_offset(pgd, va);
24636 pmd = pmd_offset(pud, va);
24637 - if (!pmd_present(*pmd))
24638 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24639 break;
24640
24641 pte = pte_offset_kernel(pmd, va);
24642 @@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24643
24644 static void __init pagetable_init(void)
24645 {
24646 - pgd_t *pgd_base = swapper_pg_dir;
24647 -
24648 - permanent_kmaps_init(pgd_base);
24649 + permanent_kmaps_init(swapper_pg_dir);
24650 }
24651
24652 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24653 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24654 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24655
24656 /* user-defined highmem size */
24657 @@ -735,6 +734,12 @@ void __init mem_init(void)
24658
24659 pci_iommu_alloc();
24660
24661 +#ifdef CONFIG_PAX_PER_CPU_PGD
24662 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24663 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24664 + KERNEL_PGD_PTRS);
24665 +#endif
24666 +
24667 #ifdef CONFIG_FLATMEM
24668 BUG_ON(!mem_map);
24669 #endif
24670 @@ -761,7 +766,7 @@ void __init mem_init(void)
24671 reservedpages++;
24672
24673 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24674 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24675 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24676 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24677
24678 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24679 @@ -802,10 +807,10 @@ void __init mem_init(void)
24680 ((unsigned long)&__init_end -
24681 (unsigned long)&__init_begin) >> 10,
24682
24683 - (unsigned long)&_etext, (unsigned long)&_edata,
24684 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24685 + (unsigned long)&_sdata, (unsigned long)&_edata,
24686 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24687
24688 - (unsigned long)&_text, (unsigned long)&_etext,
24689 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24690 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24691
24692 /*
24693 @@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24694 if (!kernel_set_to_readonly)
24695 return;
24696
24697 + start = ktla_ktva(start);
24698 pr_debug("Set kernel text: %lx - %lx for read write\n",
24699 start, start+size);
24700
24701 @@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24702 if (!kernel_set_to_readonly)
24703 return;
24704
24705 + start = ktla_ktva(start);
24706 pr_debug("Set kernel text: %lx - %lx for read only\n",
24707 start, start+size);
24708
24709 @@ -925,6 +932,7 @@ void mark_rodata_ro(void)
24710 unsigned long start = PFN_ALIGN(_text);
24711 unsigned long size = PFN_ALIGN(_etext) - start;
24712
24713 + start = ktla_ktva(start);
24714 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24715 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24716 size >> 10);
24717 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24718 index 436a030..b8596b9 100644
24719 --- a/arch/x86/mm/init_64.c
24720 +++ b/arch/x86/mm/init_64.c
24721 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24722 * around without checking the pgd every time.
24723 */
24724
24725 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24726 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24727 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24728
24729 int force_personality32;
24730 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24731
24732 for (address = start; address <= end; address += PGDIR_SIZE) {
24733 const pgd_t *pgd_ref = pgd_offset_k(address);
24734 +
24735 +#ifdef CONFIG_PAX_PER_CPU_PGD
24736 + unsigned long cpu;
24737 +#else
24738 struct page *page;
24739 +#endif
24740
24741 if (pgd_none(*pgd_ref))
24742 continue;
24743
24744 spin_lock(&pgd_lock);
24745 +
24746 +#ifdef CONFIG_PAX_PER_CPU_PGD
24747 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24748 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24749 +#else
24750 list_for_each_entry(page, &pgd_list, lru) {
24751 pgd_t *pgd;
24752 spinlock_t *pgt_lock;
24753 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24754 /* the pgt_lock only for Xen */
24755 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24756 spin_lock(pgt_lock);
24757 +#endif
24758
24759 if (pgd_none(*pgd))
24760 set_pgd(pgd, *pgd_ref);
24761 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24762 BUG_ON(pgd_page_vaddr(*pgd)
24763 != pgd_page_vaddr(*pgd_ref));
24764
24765 +#ifndef CONFIG_PAX_PER_CPU_PGD
24766 spin_unlock(pgt_lock);
24767 +#endif
24768 +
24769 }
24770 spin_unlock(&pgd_lock);
24771 }
24772 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24773 pmd = fill_pmd(pud, vaddr);
24774 pte = fill_pte(pmd, vaddr);
24775
24776 + pax_open_kernel();
24777 set_pte(pte, new_pte);
24778 + pax_close_kernel();
24779
24780 /*
24781 * It's enough to flush this one mapping.
24782 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24783 pgd = pgd_offset_k((unsigned long)__va(phys));
24784 if (pgd_none(*pgd)) {
24785 pud = (pud_t *) spp_getpage();
24786 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24787 - _PAGE_USER));
24788 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24789 }
24790 pud = pud_offset(pgd, (unsigned long)__va(phys));
24791 if (pud_none(*pud)) {
24792 pmd = (pmd_t *) spp_getpage();
24793 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24794 - _PAGE_USER));
24795 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24796 }
24797 pmd = pmd_offset(pud, phys);
24798 BUG_ON(!pmd_none(*pmd));
24799 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24800 if (pfn >= pgt_buf_top)
24801 panic("alloc_low_page: ran out of memory");
24802
24803 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24804 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24805 clear_page(adr);
24806 *phys = pfn * PAGE_SIZE;
24807 return adr;
24808 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24809
24810 phys = __pa(virt);
24811 left = phys & (PAGE_SIZE - 1);
24812 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24813 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24814 adr = (void *)(((unsigned long)adr) | left);
24815
24816 return adr;
24817 @@ -684,6 +698,12 @@ void __init mem_init(void)
24818
24819 pci_iommu_alloc();
24820
24821 +#ifdef CONFIG_PAX_PER_CPU_PGD
24822 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24823 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24824 + KERNEL_PGD_PTRS);
24825 +#endif
24826 +
24827 /* clear_bss() already clear the empty_zero_page */
24828
24829 reservedpages = 0;
24830 @@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
24831 static struct vm_area_struct gate_vma = {
24832 .vm_start = VSYSCALL_START,
24833 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24834 - .vm_page_prot = PAGE_READONLY_EXEC,
24835 - .vm_flags = VM_READ | VM_EXEC
24836 + .vm_page_prot = PAGE_READONLY,
24837 + .vm_flags = VM_READ
24838 };
24839
24840 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24841 @@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
24842
24843 const char *arch_vma_name(struct vm_area_struct *vma)
24844 {
24845 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24846 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24847 return "[vdso]";
24848 if (vma == &gate_vma)
24849 return "[vsyscall]";
24850 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24851 index 7b179b4..6bd1777 100644
24852 --- a/arch/x86/mm/iomap_32.c
24853 +++ b/arch/x86/mm/iomap_32.c
24854 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24855 type = kmap_atomic_idx_push();
24856 idx = type + KM_TYPE_NR * smp_processor_id();
24857 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24858 +
24859 + pax_open_kernel();
24860 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24861 + pax_close_kernel();
24862 +
24863 arch_flush_lazy_mmu_mode();
24864
24865 return (void *)vaddr;
24866 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24867 index be1ef57..55f0160 100644
24868 --- a/arch/x86/mm/ioremap.c
24869 +++ b/arch/x86/mm/ioremap.c
24870 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24871 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24872 int is_ram = page_is_ram(pfn);
24873
24874 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24875 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24876 return NULL;
24877 WARN_ON_ONCE(is_ram);
24878 }
24879 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24880
24881 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24882 if (page_is_ram(start >> PAGE_SHIFT))
24883 +#ifdef CONFIG_HIGHMEM
24884 + if ((start >> PAGE_SHIFT) < max_low_pfn)
24885 +#endif
24886 return __va(phys);
24887
24888 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24889 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24890 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24891
24892 static __initdata int after_paging_init;
24893 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24894 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24895
24896 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24897 {
24898 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24899 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24900
24901 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24902 - memset(bm_pte, 0, sizeof(bm_pte));
24903 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
24904 + pmd_populate_user(&init_mm, pmd, bm_pte);
24905
24906 /*
24907 * The boot-ioremap range spans multiple pmds, for which
24908 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24909 index d87dd6d..bf3fa66 100644
24910 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
24911 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24912 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24913 * memory (e.g. tracked pages)? For now, we need this to avoid
24914 * invoking kmemcheck for PnP BIOS calls.
24915 */
24916 - if (regs->flags & X86_VM_MASK)
24917 + if (v8086_mode(regs))
24918 return false;
24919 - if (regs->cs != __KERNEL_CS)
24920 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24921 return false;
24922
24923 pte = kmemcheck_pte_lookup(address);
24924 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24925 index 845df68..1d8d29f 100644
24926 --- a/arch/x86/mm/mmap.c
24927 +++ b/arch/x86/mm/mmap.c
24928 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24929 * Leave an at least ~128 MB hole with possible stack randomization.
24930 */
24931 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24932 -#define MAX_GAP (TASK_SIZE/6*5)
24933 +#define MAX_GAP (pax_task_size/6*5)
24934
24935 static int mmap_is_legacy(void)
24936 {
24937 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24938 return rnd << PAGE_SHIFT;
24939 }
24940
24941 -static unsigned long mmap_base(void)
24942 +static unsigned long mmap_base(struct mm_struct *mm)
24943 {
24944 unsigned long gap = rlimit(RLIMIT_STACK);
24945 + unsigned long pax_task_size = TASK_SIZE;
24946 +
24947 +#ifdef CONFIG_PAX_SEGMEXEC
24948 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24949 + pax_task_size = SEGMEXEC_TASK_SIZE;
24950 +#endif
24951
24952 if (gap < MIN_GAP)
24953 gap = MIN_GAP;
24954 else if (gap > MAX_GAP)
24955 gap = MAX_GAP;
24956
24957 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24958 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24959 }
24960
24961 /*
24962 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24963 * does, but not when emulating X86_32
24964 */
24965 -static unsigned long mmap_legacy_base(void)
24966 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
24967 {
24968 - if (mmap_is_ia32())
24969 + if (mmap_is_ia32()) {
24970 +
24971 +#ifdef CONFIG_PAX_SEGMEXEC
24972 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24973 + return SEGMEXEC_TASK_UNMAPPED_BASE;
24974 + else
24975 +#endif
24976 +
24977 return TASK_UNMAPPED_BASE;
24978 - else
24979 + } else
24980 return TASK_UNMAPPED_BASE + mmap_rnd();
24981 }
24982
24983 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24984 void arch_pick_mmap_layout(struct mm_struct *mm)
24985 {
24986 if (mmap_is_legacy()) {
24987 - mm->mmap_base = mmap_legacy_base();
24988 + mm->mmap_base = mmap_legacy_base(mm);
24989 +
24990 +#ifdef CONFIG_PAX_RANDMMAP
24991 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24992 + mm->mmap_base += mm->delta_mmap;
24993 +#endif
24994 +
24995 mm->get_unmapped_area = arch_get_unmapped_area;
24996 mm->unmap_area = arch_unmap_area;
24997 } else {
24998 - mm->mmap_base = mmap_base();
24999 + mm->mmap_base = mmap_base(mm);
25000 +
25001 +#ifdef CONFIG_PAX_RANDMMAP
25002 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25003 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25004 +#endif
25005 +
25006 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25007 mm->unmap_area = arch_unmap_area_topdown;
25008 }
25009 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25010 index dc0b727..dc9d71a 100644
25011 --- a/arch/x86/mm/mmio-mod.c
25012 +++ b/arch/x86/mm/mmio-mod.c
25013 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25014 break;
25015 default:
25016 {
25017 - unsigned char *ip = (unsigned char *)instptr;
25018 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25019 my_trace->opcode = MMIO_UNKNOWN_OP;
25020 my_trace->width = 0;
25021 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25022 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25023 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25024 void __iomem *addr)
25025 {
25026 - static atomic_t next_id;
25027 + static atomic_unchecked_t next_id;
25028 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25029 /* These are page-unaligned. */
25030 struct mmiotrace_map map = {
25031 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25032 .private = trace
25033 },
25034 .phys = offset,
25035 - .id = atomic_inc_return(&next_id)
25036 + .id = atomic_inc_return_unchecked(&next_id)
25037 };
25038 map.map_id = trace->id;
25039
25040 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25041 index b008656..773eac2 100644
25042 --- a/arch/x86/mm/pageattr-test.c
25043 +++ b/arch/x86/mm/pageattr-test.c
25044 @@ -36,7 +36,7 @@ enum {
25045
25046 static int pte_testbit(pte_t pte)
25047 {
25048 - return pte_flags(pte) & _PAGE_UNUSED1;
25049 + return pte_flags(pte) & _PAGE_CPA_TEST;
25050 }
25051
25052 struct split_state {
25053 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25054 index e1ebde3..b1e1db38 100644
25055 --- a/arch/x86/mm/pageattr.c
25056 +++ b/arch/x86/mm/pageattr.c
25057 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25058 */
25059 #ifdef CONFIG_PCI_BIOS
25060 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25061 - pgprot_val(forbidden) |= _PAGE_NX;
25062 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25063 #endif
25064
25065 /*
25066 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25067 * Does not cover __inittext since that is gone later on. On
25068 * 64bit we do not enforce !NX on the low mapping
25069 */
25070 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25071 - pgprot_val(forbidden) |= _PAGE_NX;
25072 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25073 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25074
25075 +#ifdef CONFIG_DEBUG_RODATA
25076 /*
25077 * The .rodata section needs to be read-only. Using the pfn
25078 * catches all aliases.
25079 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25080 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25081 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25082 pgprot_val(forbidden) |= _PAGE_RW;
25083 +#endif
25084
25085 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25086 /*
25087 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25088 }
25089 #endif
25090
25091 +#ifdef CONFIG_PAX_KERNEXEC
25092 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25093 + pgprot_val(forbidden) |= _PAGE_RW;
25094 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25095 + }
25096 +#endif
25097 +
25098 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25099
25100 return prot;
25101 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25102 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25103 {
25104 /* change init_mm */
25105 + pax_open_kernel();
25106 set_pte_atomic(kpte, pte);
25107 +
25108 #ifdef CONFIG_X86_32
25109 if (!SHARED_KERNEL_PMD) {
25110 +
25111 +#ifdef CONFIG_PAX_PER_CPU_PGD
25112 + unsigned long cpu;
25113 +#else
25114 struct page *page;
25115 +#endif
25116
25117 +#ifdef CONFIG_PAX_PER_CPU_PGD
25118 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25119 + pgd_t *pgd = get_cpu_pgd(cpu);
25120 +#else
25121 list_for_each_entry(page, &pgd_list, lru) {
25122 - pgd_t *pgd;
25123 + pgd_t *pgd = (pgd_t *)page_address(page);
25124 +#endif
25125 +
25126 pud_t *pud;
25127 pmd_t *pmd;
25128
25129 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25130 + pgd += pgd_index(address);
25131 pud = pud_offset(pgd, address);
25132 pmd = pmd_offset(pud, address);
25133 set_pte_atomic((pte_t *)pmd, pte);
25134 }
25135 }
25136 #endif
25137 + pax_close_kernel();
25138 }
25139
25140 static int
25141 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25142 index f6ff57b..481690f 100644
25143 --- a/arch/x86/mm/pat.c
25144 +++ b/arch/x86/mm/pat.c
25145 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25146
25147 if (!entry) {
25148 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25149 - current->comm, current->pid, start, end);
25150 + current->comm, task_pid_nr(current), start, end);
25151 return -EINVAL;
25152 }
25153
25154 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25155 while (cursor < to) {
25156 if (!devmem_is_allowed(pfn)) {
25157 printk(KERN_INFO
25158 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25159 - current->comm, from, to);
25160 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25161 + current->comm, from, to, cursor);
25162 return 0;
25163 }
25164 cursor += PAGE_SIZE;
25165 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25166 printk(KERN_INFO
25167 "%s:%d ioremap_change_attr failed %s "
25168 "for %Lx-%Lx\n",
25169 - current->comm, current->pid,
25170 + current->comm, task_pid_nr(current),
25171 cattr_name(flags),
25172 base, (unsigned long long)(base + size));
25173 return -EINVAL;
25174 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25175 if (want_flags != flags) {
25176 printk(KERN_WARNING
25177 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25178 - current->comm, current->pid,
25179 + current->comm, task_pid_nr(current),
25180 cattr_name(want_flags),
25181 (unsigned long long)paddr,
25182 (unsigned long long)(paddr + size),
25183 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25184 free_memtype(paddr, paddr + size);
25185 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25186 " for %Lx-%Lx, got %s\n",
25187 - current->comm, current->pid,
25188 + current->comm, task_pid_nr(current),
25189 cattr_name(want_flags),
25190 (unsigned long long)paddr,
25191 (unsigned long long)(paddr + size),
25192 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25193 index 9f0614d..92ae64a 100644
25194 --- a/arch/x86/mm/pf_in.c
25195 +++ b/arch/x86/mm/pf_in.c
25196 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25197 int i;
25198 enum reason_type rv = OTHERS;
25199
25200 - p = (unsigned char *)ins_addr;
25201 + p = (unsigned char *)ktla_ktva(ins_addr);
25202 p += skip_prefix(p, &prf);
25203 p += get_opcode(p, &opcode);
25204
25205 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25206 struct prefix_bits prf;
25207 int i;
25208
25209 - p = (unsigned char *)ins_addr;
25210 + p = (unsigned char *)ktla_ktva(ins_addr);
25211 p += skip_prefix(p, &prf);
25212 p += get_opcode(p, &opcode);
25213
25214 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25215 struct prefix_bits prf;
25216 int i;
25217
25218 - p = (unsigned char *)ins_addr;
25219 + p = (unsigned char *)ktla_ktva(ins_addr);
25220 p += skip_prefix(p, &prf);
25221 p += get_opcode(p, &opcode);
25222
25223 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25224 struct prefix_bits prf;
25225 int i;
25226
25227 - p = (unsigned char *)ins_addr;
25228 + p = (unsigned char *)ktla_ktva(ins_addr);
25229 p += skip_prefix(p, &prf);
25230 p += get_opcode(p, &opcode);
25231 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25232 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25233 struct prefix_bits prf;
25234 int i;
25235
25236 - p = (unsigned char *)ins_addr;
25237 + p = (unsigned char *)ktla_ktva(ins_addr);
25238 p += skip_prefix(p, &prf);
25239 p += get_opcode(p, &opcode);
25240 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25241 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25242 index 8573b83..7d9628f 100644
25243 --- a/arch/x86/mm/pgtable.c
25244 +++ b/arch/x86/mm/pgtable.c
25245 @@ -84,10 +84,60 @@ static inline void pgd_list_del(pgd_t *pgd)
25246 list_del(&page->lru);
25247 }
25248
25249 -#define UNSHARED_PTRS_PER_PGD \
25250 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25251 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25252 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25253
25254 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25255 +{
25256 + while (count--)
25257 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25258 +}
25259 +#endif
25260
25261 +#ifdef CONFIG_PAX_PER_CPU_PGD
25262 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25263 +{
25264 + while (count--) {
25265 + pgd_t pgd;
25266 +
25267 +#ifdef CONFIG_X86_64
25268 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25269 +#else
25270 + pgd = *src++;
25271 +#endif
25272 +
25273 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25274 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25275 +#endif
25276 +
25277 + *dst++ = pgd;
25278 + }
25279 +
25280 +}
25281 +#endif
25282 +
25283 +#ifdef CONFIG_X86_64
25284 +#define pxd_t pud_t
25285 +#define pyd_t pgd_t
25286 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25287 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25288 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25289 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25290 +#define PYD_SIZE PGDIR_SIZE
25291 +#else
25292 +#define pxd_t pmd_t
25293 +#define pyd_t pud_t
25294 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25295 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25296 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25297 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25298 +#define PYD_SIZE PUD_SIZE
25299 +#endif
25300 +
25301 +#ifdef CONFIG_PAX_PER_CPU_PGD
25302 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25303 +static inline void pgd_dtor(pgd_t *pgd) {}
25304 +#else
25305 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25306 {
25307 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25308 @@ -128,6 +178,7 @@ static void pgd_dtor(pgd_t *pgd)
25309 pgd_list_del(pgd);
25310 spin_unlock(&pgd_lock);
25311 }
25312 +#endif
25313
25314 /*
25315 * List of all pgd's needed for non-PAE so it can invalidate entries
25316 @@ -140,7 +191,7 @@ static void pgd_dtor(pgd_t *pgd)
25317 * -- wli
25318 */
25319
25320 -#ifdef CONFIG_X86_PAE
25321 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25322 /*
25323 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25324 * updating the top-level pagetable entries to guarantee the
25325 @@ -152,7 +203,7 @@ static void pgd_dtor(pgd_t *pgd)
25326 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25327 * and initialize the kernel pmds here.
25328 */
25329 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25330 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25331
25332 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25333 {
25334 @@ -170,36 +221,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25335 */
25336 flush_tlb_mm(mm);
25337 }
25338 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25339 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25340 #else /* !CONFIG_X86_PAE */
25341
25342 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25343 -#define PREALLOCATED_PMDS 0
25344 +#define PREALLOCATED_PXDS 0
25345
25346 #endif /* CONFIG_X86_PAE */
25347
25348 -static void free_pmds(pmd_t *pmds[])
25349 +static void free_pxds(pxd_t *pxds[])
25350 {
25351 int i;
25352
25353 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25354 - if (pmds[i])
25355 - free_page((unsigned long)pmds[i]);
25356 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25357 + if (pxds[i])
25358 + free_page((unsigned long)pxds[i]);
25359 }
25360
25361 -static int preallocate_pmds(pmd_t *pmds[])
25362 +static int preallocate_pxds(pxd_t *pxds[])
25363 {
25364 int i;
25365 bool failed = false;
25366
25367 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25368 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25369 - if (pmd == NULL)
25370 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25371 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25372 + if (pxd == NULL)
25373 failed = true;
25374 - pmds[i] = pmd;
25375 + pxds[i] = pxd;
25376 }
25377
25378 if (failed) {
25379 - free_pmds(pmds);
25380 + free_pxds(pxds);
25381 return -ENOMEM;
25382 }
25383
25384 @@ -212,51 +265,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25385 * preallocate which never got a corresponding vma will need to be
25386 * freed manually.
25387 */
25388 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25389 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25390 {
25391 int i;
25392
25393 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25394 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25395 pgd_t pgd = pgdp[i];
25396
25397 if (pgd_val(pgd) != 0) {
25398 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25399 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25400
25401 - pgdp[i] = native_make_pgd(0);
25402 + set_pgd(pgdp + i, native_make_pgd(0));
25403
25404 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25405 - pmd_free(mm, pmd);
25406 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25407 + pxd_free(mm, pxd);
25408 }
25409 }
25410 }
25411
25412 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25413 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25414 {
25415 - pud_t *pud;
25416 + pyd_t *pyd;
25417 unsigned long addr;
25418 int i;
25419
25420 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25421 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25422 return;
25423
25424 - pud = pud_offset(pgd, 0);
25425 +#ifdef CONFIG_X86_64
25426 + pyd = pyd_offset(mm, 0L);
25427 +#else
25428 + pyd = pyd_offset(pgd, 0L);
25429 +#endif
25430
25431 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25432 - i++, pud++, addr += PUD_SIZE) {
25433 - pmd_t *pmd = pmds[i];
25434 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25435 + i++, pyd++, addr += PYD_SIZE) {
25436 + pxd_t *pxd = pxds[i];
25437
25438 if (i >= KERNEL_PGD_BOUNDARY)
25439 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25440 - sizeof(pmd_t) * PTRS_PER_PMD);
25441 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25442 + sizeof(pxd_t) * PTRS_PER_PMD);
25443
25444 - pud_populate(mm, pud, pmd);
25445 + pyd_populate(mm, pyd, pxd);
25446 }
25447 }
25448
25449 pgd_t *pgd_alloc(struct mm_struct *mm)
25450 {
25451 pgd_t *pgd;
25452 - pmd_t *pmds[PREALLOCATED_PMDS];
25453 + pxd_t *pxds[PREALLOCATED_PXDS];
25454
25455 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25456
25457 @@ -265,11 +322,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25458
25459 mm->pgd = pgd;
25460
25461 - if (preallocate_pmds(pmds) != 0)
25462 + if (preallocate_pxds(pxds) != 0)
25463 goto out_free_pgd;
25464
25465 if (paravirt_pgd_alloc(mm) != 0)
25466 - goto out_free_pmds;
25467 + goto out_free_pxds;
25468
25469 /*
25470 * Make sure that pre-populating the pmds is atomic with
25471 @@ -279,14 +336,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25472 spin_lock(&pgd_lock);
25473
25474 pgd_ctor(mm, pgd);
25475 - pgd_prepopulate_pmd(mm, pgd, pmds);
25476 + pgd_prepopulate_pxd(mm, pgd, pxds);
25477
25478 spin_unlock(&pgd_lock);
25479
25480 return pgd;
25481
25482 -out_free_pmds:
25483 - free_pmds(pmds);
25484 +out_free_pxds:
25485 + free_pxds(pxds);
25486 out_free_pgd:
25487 free_page((unsigned long)pgd);
25488 out:
25489 @@ -295,7 +352,7 @@ out:
25490
25491 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25492 {
25493 - pgd_mop_up_pmds(mm, pgd);
25494 + pgd_mop_up_pxds(mm, pgd);
25495 pgd_dtor(pgd);
25496 paravirt_pgd_free(mm, pgd);
25497 free_page((unsigned long)pgd);
25498 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25499 index cac7184..09a39fa 100644
25500 --- a/arch/x86/mm/pgtable_32.c
25501 +++ b/arch/x86/mm/pgtable_32.c
25502 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25503 return;
25504 }
25505 pte = pte_offset_kernel(pmd, vaddr);
25506 +
25507 + pax_open_kernel();
25508 if (pte_val(pteval))
25509 set_pte_at(&init_mm, vaddr, pte, pteval);
25510 else
25511 pte_clear(&init_mm, vaddr, pte);
25512 + pax_close_kernel();
25513
25514 /*
25515 * It's enough to flush this one mapping.
25516 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25517 index 410531d..0f16030 100644
25518 --- a/arch/x86/mm/setup_nx.c
25519 +++ b/arch/x86/mm/setup_nx.c
25520 @@ -5,8 +5,10 @@
25521 #include <asm/pgtable.h>
25522 #include <asm/proto.h>
25523
25524 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25525 static int disable_nx __cpuinitdata;
25526
25527 +#ifndef CONFIG_PAX_PAGEEXEC
25528 /*
25529 * noexec = on|off
25530 *
25531 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25532 return 0;
25533 }
25534 early_param("noexec", noexec_setup);
25535 +#endif
25536 +
25537 +#endif
25538
25539 void __cpuinit x86_configure_nx(void)
25540 {
25541 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25542 if (cpu_has_nx && !disable_nx)
25543 __supported_pte_mask |= _PAGE_NX;
25544 else
25545 +#endif
25546 __supported_pte_mask &= ~_PAGE_NX;
25547 }
25548
25549 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25550 index d6c0418..06a0ad5 100644
25551 --- a/arch/x86/mm/tlb.c
25552 +++ b/arch/x86/mm/tlb.c
25553 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25554 BUG();
25555 cpumask_clear_cpu(cpu,
25556 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25557 +
25558 +#ifndef CONFIG_PAX_PER_CPU_PGD
25559 load_cr3(swapper_pg_dir);
25560 +#endif
25561 +
25562 }
25563 EXPORT_SYMBOL_GPL(leave_mm);
25564
25565 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25566 index 6687022..ceabcfa 100644
25567 --- a/arch/x86/net/bpf_jit.S
25568 +++ b/arch/x86/net/bpf_jit.S
25569 @@ -9,6 +9,7 @@
25570 */
25571 #include <linux/linkage.h>
25572 #include <asm/dwarf2.h>
25573 +#include <asm/alternative-asm.h>
25574
25575 /*
25576 * Calling convention :
25577 @@ -35,6 +36,7 @@ sk_load_word:
25578 jle bpf_slow_path_word
25579 mov (SKBDATA,%rsi),%eax
25580 bswap %eax /* ntohl() */
25581 + pax_force_retaddr
25582 ret
25583
25584
25585 @@ -53,6 +55,7 @@ sk_load_half:
25586 jle bpf_slow_path_half
25587 movzwl (SKBDATA,%rsi),%eax
25588 rol $8,%ax # ntohs()
25589 + pax_force_retaddr
25590 ret
25591
25592 sk_load_byte_ind:
25593 @@ -66,6 +69,7 @@ sk_load_byte:
25594 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25595 jle bpf_slow_path_byte
25596 movzbl (SKBDATA,%rsi),%eax
25597 + pax_force_retaddr
25598 ret
25599
25600 /**
25601 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25602 movzbl (SKBDATA,%rsi),%ebx
25603 and $15,%bl
25604 shl $2,%bl
25605 + pax_force_retaddr
25606 ret
25607 CFI_ENDPROC
25608 ENDPROC(sk_load_byte_msh)
25609 @@ -91,6 +96,7 @@ bpf_error:
25610 xor %eax,%eax
25611 mov -8(%rbp),%rbx
25612 leaveq
25613 + pax_force_retaddr
25614 ret
25615
25616 /* rsi contains offset and can be scratched */
25617 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25618 js bpf_error
25619 mov -12(%rbp),%eax
25620 bswap %eax
25621 + pax_force_retaddr
25622 ret
25623
25624 bpf_slow_path_half:
25625 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25626 mov -12(%rbp),%ax
25627 rol $8,%ax
25628 movzwl %ax,%eax
25629 + pax_force_retaddr
25630 ret
25631
25632 bpf_slow_path_byte:
25633 bpf_slow_path_common(1)
25634 js bpf_error
25635 movzbl -12(%rbp),%eax
25636 + pax_force_retaddr
25637 ret
25638
25639 bpf_slow_path_byte_msh:
25640 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25641 and $15,%al
25642 shl $2,%al
25643 xchg %eax,%ebx
25644 + pax_force_retaddr
25645 ret
25646 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25647 index 5a5b6e4..201d42e 100644
25648 --- a/arch/x86/net/bpf_jit_comp.c
25649 +++ b/arch/x86/net/bpf_jit_comp.c
25650 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25651 set_fs(old_fs);
25652 }
25653
25654 +struct bpf_jit_work {
25655 + struct work_struct work;
25656 + void *image;
25657 +};
25658
25659 void bpf_jit_compile(struct sk_filter *fp)
25660 {
25661 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25662 if (addrs == NULL)
25663 return;
25664
25665 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25666 + if (!fp->work)
25667 + goto out;
25668 +
25669 /* Before first pass, make a rough estimation of addrs[]
25670 * each bpf instruction is translated to less than 64 bytes
25671 */
25672 @@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25673 common_load: seen |= SEEN_DATAREF;
25674 if ((int)K < 0) {
25675 /* Abort the JIT because __load_pointer() is needed. */
25676 - goto out;
25677 + goto error;
25678 }
25679 t_offset = func - (image + addrs[i]);
25680 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25681 @@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
25682 case BPF_S_LDX_B_MSH:
25683 if ((int)K < 0) {
25684 /* Abort the JIT because __load_pointer() is needed. */
25685 - goto out;
25686 + goto error;
25687 }
25688 seen |= SEEN_DATAREF | SEEN_XREG;
25689 t_offset = sk_load_byte_msh - (image + addrs[i]);
25690 @@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25691 break;
25692 default:
25693 /* hmm, too complex filter, give up with jit compiler */
25694 - goto out;
25695 + goto error;
25696 }
25697 ilen = prog - temp;
25698 if (image) {
25699 if (unlikely(proglen + ilen > oldproglen)) {
25700 pr_err("bpb_jit_compile fatal error\n");
25701 - kfree(addrs);
25702 - module_free(NULL, image);
25703 - return;
25704 + module_free_exec(NULL, image);
25705 + goto error;
25706 }
25707 + pax_open_kernel();
25708 memcpy(image + proglen, temp, ilen);
25709 + pax_close_kernel();
25710 }
25711 proglen += ilen;
25712 addrs[i] = proglen;
25713 @@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25714 break;
25715 }
25716 if (proglen == oldproglen) {
25717 - image = module_alloc(max_t(unsigned int,
25718 - proglen,
25719 - sizeof(struct work_struct)));
25720 + image = module_alloc_exec(proglen);
25721 if (!image)
25722 - goto out;
25723 + goto error;
25724 }
25725 oldproglen = proglen;
25726 }
25727 @@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25728 bpf_flush_icache(image, image + proglen);
25729
25730 fp->bpf_func = (void *)image;
25731 - }
25732 + } else
25733 +error:
25734 + kfree(fp->work);
25735 +
25736 out:
25737 kfree(addrs);
25738 return;
25739 @@ -641,18 +651,20 @@ out:
25740
25741 static void jit_free_defer(struct work_struct *arg)
25742 {
25743 - module_free(NULL, arg);
25744 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25745 + kfree(arg);
25746 }
25747
25748 /* run from softirq, we must use a work_struct to call
25749 - * module_free() from process context
25750 + * module_free_exec() from process context
25751 */
25752 void bpf_jit_free(struct sk_filter *fp)
25753 {
25754 if (fp->bpf_func != sk_run_filter) {
25755 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25756 + struct work_struct *work = &fp->work->work;
25757
25758 INIT_WORK(work, jit_free_defer);
25759 + fp->work->image = fp->bpf_func;
25760 schedule_work(work);
25761 }
25762 }
25763 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25764 index bff89df..377758a 100644
25765 --- a/arch/x86/oprofile/backtrace.c
25766 +++ b/arch/x86/oprofile/backtrace.c
25767 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25768 struct stack_frame_ia32 *fp;
25769 unsigned long bytes;
25770
25771 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25772 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25773 if (bytes != sizeof(bufhead))
25774 return NULL;
25775
25776 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25777 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25778
25779 oprofile_add_trace(bufhead[0].return_address);
25780
25781 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25782 struct stack_frame bufhead[2];
25783 unsigned long bytes;
25784
25785 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25786 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25787 if (bytes != sizeof(bufhead))
25788 return NULL;
25789
25790 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25791 {
25792 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25793
25794 - if (!user_mode_vm(regs)) {
25795 + if (!user_mode(regs)) {
25796 unsigned long stack = kernel_stack_pointer(regs);
25797 if (depth)
25798 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25799 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25800 index cb29191..036766d 100644
25801 --- a/arch/x86/pci/mrst.c
25802 +++ b/arch/x86/pci/mrst.c
25803 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25804 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25805 pci_mmcfg_late_init();
25806 pcibios_enable_irq = mrst_pci_irq_enable;
25807 - pci_root_ops = pci_mrst_ops;
25808 + pax_open_kernel();
25809 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25810 + pax_close_kernel();
25811 /* Continue with standard init */
25812 return 1;
25813 }
25814 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25815 index da8fe05..7ee6704 100644
25816 --- a/arch/x86/pci/pcbios.c
25817 +++ b/arch/x86/pci/pcbios.c
25818 @@ -79,50 +79,93 @@ union bios32 {
25819 static struct {
25820 unsigned long address;
25821 unsigned short segment;
25822 -} bios32_indirect = { 0, __KERNEL_CS };
25823 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25824
25825 /*
25826 * Returns the entry point for the given service, NULL on error
25827 */
25828
25829 -static unsigned long bios32_service(unsigned long service)
25830 +static unsigned long __devinit bios32_service(unsigned long service)
25831 {
25832 unsigned char return_code; /* %al */
25833 unsigned long address; /* %ebx */
25834 unsigned long length; /* %ecx */
25835 unsigned long entry; /* %edx */
25836 unsigned long flags;
25837 + struct desc_struct d, *gdt;
25838
25839 local_irq_save(flags);
25840 - __asm__("lcall *(%%edi); cld"
25841 +
25842 + gdt = get_cpu_gdt_table(smp_processor_id());
25843 +
25844 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25845 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25846 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25847 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25848 +
25849 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25850 : "=a" (return_code),
25851 "=b" (address),
25852 "=c" (length),
25853 "=d" (entry)
25854 : "0" (service),
25855 "1" (0),
25856 - "D" (&bios32_indirect));
25857 + "D" (&bios32_indirect),
25858 + "r"(__PCIBIOS_DS)
25859 + : "memory");
25860 +
25861 + pax_open_kernel();
25862 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25863 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25864 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25865 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25866 + pax_close_kernel();
25867 +
25868 local_irq_restore(flags);
25869
25870 switch (return_code) {
25871 - case 0:
25872 - return address + entry;
25873 - case 0x80: /* Not present */
25874 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25875 - return 0;
25876 - default: /* Shouldn't happen */
25877 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25878 - service, return_code);
25879 + case 0: {
25880 + int cpu;
25881 + unsigned char flags;
25882 +
25883 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25884 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25885 + printk(KERN_WARNING "bios32_service: not valid\n");
25886 return 0;
25887 + }
25888 + address = address + PAGE_OFFSET;
25889 + length += 16UL; /* some BIOSs underreport this... */
25890 + flags = 4;
25891 + if (length >= 64*1024*1024) {
25892 + length >>= PAGE_SHIFT;
25893 + flags |= 8;
25894 + }
25895 +
25896 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25897 + gdt = get_cpu_gdt_table(cpu);
25898 + pack_descriptor(&d, address, length, 0x9b, flags);
25899 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25900 + pack_descriptor(&d, address, length, 0x93, flags);
25901 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25902 + }
25903 + return entry;
25904 + }
25905 + case 0x80: /* Not present */
25906 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25907 + return 0;
25908 + default: /* Shouldn't happen */
25909 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25910 + service, return_code);
25911 + return 0;
25912 }
25913 }
25914
25915 static struct {
25916 unsigned long address;
25917 unsigned short segment;
25918 -} pci_indirect = { 0, __KERNEL_CS };
25919 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25920
25921 -static int pci_bios_present;
25922 +static int pci_bios_present __read_only;
25923
25924 static int __devinit check_pcibios(void)
25925 {
25926 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25927 unsigned long flags, pcibios_entry;
25928
25929 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25930 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25931 + pci_indirect.address = pcibios_entry;
25932
25933 local_irq_save(flags);
25934 - __asm__(
25935 - "lcall *(%%edi); cld\n\t"
25936 + __asm__("movw %w6, %%ds\n\t"
25937 + "lcall *%%ss:(%%edi); cld\n\t"
25938 + "push %%ss\n\t"
25939 + "pop %%ds\n\t"
25940 "jc 1f\n\t"
25941 "xor %%ah, %%ah\n"
25942 "1:"
25943 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25944 "=b" (ebx),
25945 "=c" (ecx)
25946 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25947 - "D" (&pci_indirect)
25948 + "D" (&pci_indirect),
25949 + "r" (__PCIBIOS_DS)
25950 : "memory");
25951 local_irq_restore(flags);
25952
25953 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25954
25955 switch (len) {
25956 case 1:
25957 - __asm__("lcall *(%%esi); cld\n\t"
25958 + __asm__("movw %w6, %%ds\n\t"
25959 + "lcall *%%ss:(%%esi); cld\n\t"
25960 + "push %%ss\n\t"
25961 + "pop %%ds\n\t"
25962 "jc 1f\n\t"
25963 "xor %%ah, %%ah\n"
25964 "1:"
25965 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25966 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25967 "b" (bx),
25968 "D" ((long)reg),
25969 - "S" (&pci_indirect));
25970 + "S" (&pci_indirect),
25971 + "r" (__PCIBIOS_DS));
25972 /*
25973 * Zero-extend the result beyond 8 bits, do not trust the
25974 * BIOS having done it:
25975 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25976 *value &= 0xff;
25977 break;
25978 case 2:
25979 - __asm__("lcall *(%%esi); cld\n\t"
25980 + __asm__("movw %w6, %%ds\n\t"
25981 + "lcall *%%ss:(%%esi); cld\n\t"
25982 + "push %%ss\n\t"
25983 + "pop %%ds\n\t"
25984 "jc 1f\n\t"
25985 "xor %%ah, %%ah\n"
25986 "1:"
25987 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25988 : "1" (PCIBIOS_READ_CONFIG_WORD),
25989 "b" (bx),
25990 "D" ((long)reg),
25991 - "S" (&pci_indirect));
25992 + "S" (&pci_indirect),
25993 + "r" (__PCIBIOS_DS));
25994 /*
25995 * Zero-extend the result beyond 16 bits, do not trust the
25996 * BIOS having done it:
25997 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25998 *value &= 0xffff;
25999 break;
26000 case 4:
26001 - __asm__("lcall *(%%esi); cld\n\t"
26002 + __asm__("movw %w6, %%ds\n\t"
26003 + "lcall *%%ss:(%%esi); cld\n\t"
26004 + "push %%ss\n\t"
26005 + "pop %%ds\n\t"
26006 "jc 1f\n\t"
26007 "xor %%ah, %%ah\n"
26008 "1:"
26009 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26010 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26011 "b" (bx),
26012 "D" ((long)reg),
26013 - "S" (&pci_indirect));
26014 + "S" (&pci_indirect),
26015 + "r" (__PCIBIOS_DS));
26016 break;
26017 }
26018
26019 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26020
26021 switch (len) {
26022 case 1:
26023 - __asm__("lcall *(%%esi); cld\n\t"
26024 + __asm__("movw %w6, %%ds\n\t"
26025 + "lcall *%%ss:(%%esi); cld\n\t"
26026 + "push %%ss\n\t"
26027 + "pop %%ds\n\t"
26028 "jc 1f\n\t"
26029 "xor %%ah, %%ah\n"
26030 "1:"
26031 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26032 "c" (value),
26033 "b" (bx),
26034 "D" ((long)reg),
26035 - "S" (&pci_indirect));
26036 + "S" (&pci_indirect),
26037 + "r" (__PCIBIOS_DS));
26038 break;
26039 case 2:
26040 - __asm__("lcall *(%%esi); cld\n\t"
26041 + __asm__("movw %w6, %%ds\n\t"
26042 + "lcall *%%ss:(%%esi); cld\n\t"
26043 + "push %%ss\n\t"
26044 + "pop %%ds\n\t"
26045 "jc 1f\n\t"
26046 "xor %%ah, %%ah\n"
26047 "1:"
26048 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26049 "c" (value),
26050 "b" (bx),
26051 "D" ((long)reg),
26052 - "S" (&pci_indirect));
26053 + "S" (&pci_indirect),
26054 + "r" (__PCIBIOS_DS));
26055 break;
26056 case 4:
26057 - __asm__("lcall *(%%esi); cld\n\t"
26058 + __asm__("movw %w6, %%ds\n\t"
26059 + "lcall *%%ss:(%%esi); cld\n\t"
26060 + "push %%ss\n\t"
26061 + "pop %%ds\n\t"
26062 "jc 1f\n\t"
26063 "xor %%ah, %%ah\n"
26064 "1:"
26065 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26066 "c" (value),
26067 "b" (bx),
26068 "D" ((long)reg),
26069 - "S" (&pci_indirect));
26070 + "S" (&pci_indirect),
26071 + "r" (__PCIBIOS_DS));
26072 break;
26073 }
26074
26075 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26076
26077 DBG("PCI: Fetching IRQ routing table... ");
26078 __asm__("push %%es\n\t"
26079 + "movw %w8, %%ds\n\t"
26080 "push %%ds\n\t"
26081 "pop %%es\n\t"
26082 - "lcall *(%%esi); cld\n\t"
26083 + "lcall *%%ss:(%%esi); cld\n\t"
26084 "pop %%es\n\t"
26085 + "push %%ss\n\t"
26086 + "pop %%ds\n"
26087 "jc 1f\n\t"
26088 "xor %%ah, %%ah\n"
26089 "1:"
26090 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26091 "1" (0),
26092 "D" ((long) &opt),
26093 "S" (&pci_indirect),
26094 - "m" (opt)
26095 + "m" (opt),
26096 + "r" (__PCIBIOS_DS)
26097 : "memory");
26098 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26099 if (ret & 0xff00)
26100 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26101 {
26102 int ret;
26103
26104 - __asm__("lcall *(%%esi); cld\n\t"
26105 + __asm__("movw %w5, %%ds\n\t"
26106 + "lcall *%%ss:(%%esi); cld\n\t"
26107 + "push %%ss\n\t"
26108 + "pop %%ds\n"
26109 "jc 1f\n\t"
26110 "xor %%ah, %%ah\n"
26111 "1:"
26112 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26113 : "0" (PCIBIOS_SET_PCI_HW_INT),
26114 "b" ((dev->bus->number << 8) | dev->devfn),
26115 "c" ((irq << 8) | (pin + 10)),
26116 - "S" (&pci_indirect));
26117 + "S" (&pci_indirect),
26118 + "r" (__PCIBIOS_DS));
26119 return !(ret & 0xff00);
26120 }
26121 EXPORT_SYMBOL(pcibios_set_irq_routing);
26122 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26123 index 40e4469..1ab536e 100644
26124 --- a/arch/x86/platform/efi/efi_32.c
26125 +++ b/arch/x86/platform/efi/efi_32.c
26126 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26127 {
26128 struct desc_ptr gdt_descr;
26129
26130 +#ifdef CONFIG_PAX_KERNEXEC
26131 + struct desc_struct d;
26132 +#endif
26133 +
26134 local_irq_save(efi_rt_eflags);
26135
26136 load_cr3(initial_page_table);
26137 __flush_tlb_all();
26138
26139 +#ifdef CONFIG_PAX_KERNEXEC
26140 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26141 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26142 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26143 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26144 +#endif
26145 +
26146 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26147 gdt_descr.size = GDT_SIZE - 1;
26148 load_gdt(&gdt_descr);
26149 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26150 {
26151 struct desc_ptr gdt_descr;
26152
26153 +#ifdef CONFIG_PAX_KERNEXEC
26154 + struct desc_struct d;
26155 +
26156 + memset(&d, 0, sizeof d);
26157 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26158 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26159 +#endif
26160 +
26161 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26162 gdt_descr.size = GDT_SIZE - 1;
26163 load_gdt(&gdt_descr);
26164 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26165 index fbe66e6..c5c0dd2 100644
26166 --- a/arch/x86/platform/efi/efi_stub_32.S
26167 +++ b/arch/x86/platform/efi/efi_stub_32.S
26168 @@ -6,7 +6,9 @@
26169 */
26170
26171 #include <linux/linkage.h>
26172 +#include <linux/init.h>
26173 #include <asm/page_types.h>
26174 +#include <asm/segment.h>
26175
26176 /*
26177 * efi_call_phys(void *, ...) is a function with variable parameters.
26178 @@ -20,7 +22,7 @@
26179 * service functions will comply with gcc calling convention, too.
26180 */
26181
26182 -.text
26183 +__INIT
26184 ENTRY(efi_call_phys)
26185 /*
26186 * 0. The function can only be called in Linux kernel. So CS has been
26187 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26188 * The mapping of lower virtual memory has been created in prelog and
26189 * epilog.
26190 */
26191 - movl $1f, %edx
26192 - subl $__PAGE_OFFSET, %edx
26193 - jmp *%edx
26194 + movl $(__KERNEXEC_EFI_DS), %edx
26195 + mov %edx, %ds
26196 + mov %edx, %es
26197 + mov %edx, %ss
26198 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26199 1:
26200
26201 /*
26202 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26203 * parameter 2, ..., param n. To make things easy, we save the return
26204 * address of efi_call_phys in a global variable.
26205 */
26206 - popl %edx
26207 - movl %edx, saved_return_addr
26208 - /* get the function pointer into ECX*/
26209 - popl %ecx
26210 - movl %ecx, efi_rt_function_ptr
26211 - movl $2f, %edx
26212 - subl $__PAGE_OFFSET, %edx
26213 - pushl %edx
26214 + popl (saved_return_addr)
26215 + popl (efi_rt_function_ptr)
26216
26217 /*
26218 * 3. Clear PG bit in %CR0.
26219 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26220 /*
26221 * 5. Call the physical function.
26222 */
26223 - jmp *%ecx
26224 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26225
26226 -2:
26227 /*
26228 * 6. After EFI runtime service returns, control will return to
26229 * following instruction. We'd better readjust stack pointer first.
26230 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26231 movl %cr0, %edx
26232 orl $0x80000000, %edx
26233 movl %edx, %cr0
26234 - jmp 1f
26235 -1:
26236 +
26237 /*
26238 * 8. Now restore the virtual mode from flat mode by
26239 * adding EIP with PAGE_OFFSET.
26240 */
26241 - movl $1f, %edx
26242 - jmp *%edx
26243 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26244 1:
26245 + movl $(__KERNEL_DS), %edx
26246 + mov %edx, %ds
26247 + mov %edx, %es
26248 + mov %edx, %ss
26249
26250 /*
26251 * 9. Balance the stack. And because EAX contain the return value,
26252 * we'd better not clobber it.
26253 */
26254 - leal efi_rt_function_ptr, %edx
26255 - movl (%edx), %ecx
26256 - pushl %ecx
26257 + pushl (efi_rt_function_ptr)
26258
26259 /*
26260 - * 10. Push the saved return address onto the stack and return.
26261 + * 10. Return to the saved return address.
26262 */
26263 - leal saved_return_addr, %edx
26264 - movl (%edx), %ecx
26265 - pushl %ecx
26266 - ret
26267 + jmpl *(saved_return_addr)
26268 ENDPROC(efi_call_phys)
26269 .previous
26270
26271 -.data
26272 +__INITDATA
26273 saved_return_addr:
26274 .long 0
26275 efi_rt_function_ptr:
26276 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26277 index 4c07cca..2c8427d 100644
26278 --- a/arch/x86/platform/efi/efi_stub_64.S
26279 +++ b/arch/x86/platform/efi/efi_stub_64.S
26280 @@ -7,6 +7,7 @@
26281 */
26282
26283 #include <linux/linkage.h>
26284 +#include <asm/alternative-asm.h>
26285
26286 #define SAVE_XMM \
26287 mov %rsp, %rax; \
26288 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26289 call *%rdi
26290 addq $32, %rsp
26291 RESTORE_XMM
26292 + pax_force_retaddr 0, 1
26293 ret
26294 ENDPROC(efi_call0)
26295
26296 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26297 call *%rdi
26298 addq $32, %rsp
26299 RESTORE_XMM
26300 + pax_force_retaddr 0, 1
26301 ret
26302 ENDPROC(efi_call1)
26303
26304 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26305 call *%rdi
26306 addq $32, %rsp
26307 RESTORE_XMM
26308 + pax_force_retaddr 0, 1
26309 ret
26310 ENDPROC(efi_call2)
26311
26312 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26313 call *%rdi
26314 addq $32, %rsp
26315 RESTORE_XMM
26316 + pax_force_retaddr 0, 1
26317 ret
26318 ENDPROC(efi_call3)
26319
26320 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26321 call *%rdi
26322 addq $32, %rsp
26323 RESTORE_XMM
26324 + pax_force_retaddr 0, 1
26325 ret
26326 ENDPROC(efi_call4)
26327
26328 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26329 call *%rdi
26330 addq $48, %rsp
26331 RESTORE_XMM
26332 + pax_force_retaddr 0, 1
26333 ret
26334 ENDPROC(efi_call5)
26335
26336 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26337 call *%rdi
26338 addq $48, %rsp
26339 RESTORE_XMM
26340 + pax_force_retaddr 0, 1
26341 ret
26342 ENDPROC(efi_call6)
26343 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26344 index 475e2cd..1b8e708 100644
26345 --- a/arch/x86/platform/mrst/mrst.c
26346 +++ b/arch/x86/platform/mrst/mrst.c
26347 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26348 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26349 int sfi_mrtc_num;
26350
26351 -static void mrst_power_off(void)
26352 +static __noreturn void mrst_power_off(void)
26353 {
26354 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26355 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
26356 + BUG();
26357 }
26358
26359 -static void mrst_reboot(void)
26360 +static __noreturn void mrst_reboot(void)
26361 {
26362 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26363 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
26364 else
26365 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26366 + BUG();
26367 }
26368
26369 /* parse all the mtimer info to a static mtimer array */
26370 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
26371 index 3ae0e61..4202d86 100644
26372 --- a/arch/x86/platform/uv/tlb_uv.c
26373 +++ b/arch/x86/platform/uv/tlb_uv.c
26374 @@ -1424,6 +1424,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
26375 * 0: display meaning of the statistics
26376 */
26377 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26378 + size_t count, loff_t *data) __size_overflow(3);
26379 +static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26380 size_t count, loff_t *data)
26381 {
26382 int cpu;
26383 @@ -1539,6 +1541,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
26384 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
26385 */
26386 static ssize_t tunables_write(struct file *file, const char __user *user,
26387 + size_t count, loff_t *data) __size_overflow(3);
26388 +static ssize_t tunables_write(struct file *file, const char __user *user,
26389 size_t count, loff_t *data)
26390 {
26391 int cpu;
26392 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26393 index f10c0af..3ec1f95 100644
26394 --- a/arch/x86/power/cpu.c
26395 +++ b/arch/x86/power/cpu.c
26396 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
26397 static void fix_processor_context(void)
26398 {
26399 int cpu = smp_processor_id();
26400 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26401 + struct tss_struct *t = init_tss + cpu;
26402
26403 set_tss_desc(cpu, t); /*
26404 * This just modifies memory; should not be
26405 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
26406 */
26407
26408 #ifdef CONFIG_X86_64
26409 + pax_open_kernel();
26410 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26411 + pax_close_kernel();
26412
26413 syscall_init(); /* This sets MSR_*STAR and related */
26414 #endif
26415 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26416 index 5d17950..2253fc9 100644
26417 --- a/arch/x86/vdso/Makefile
26418 +++ b/arch/x86/vdso/Makefile
26419 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
26420 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26421 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26422
26423 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26424 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26425 GCOV_PROFILE := n
26426
26427 #
26428 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26429 index 468d591..8e80a0a 100644
26430 --- a/arch/x86/vdso/vdso32-setup.c
26431 +++ b/arch/x86/vdso/vdso32-setup.c
26432 @@ -25,6 +25,7 @@
26433 #include <asm/tlbflush.h>
26434 #include <asm/vdso.h>
26435 #include <asm/proto.h>
26436 +#include <asm/mman.h>
26437
26438 enum {
26439 VDSO_DISABLED = 0,
26440 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26441 void enable_sep_cpu(void)
26442 {
26443 int cpu = get_cpu();
26444 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26445 + struct tss_struct *tss = init_tss + cpu;
26446
26447 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26448 put_cpu();
26449 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26450 gate_vma.vm_start = FIXADDR_USER_START;
26451 gate_vma.vm_end = FIXADDR_USER_END;
26452 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26453 - gate_vma.vm_page_prot = __P101;
26454 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26455 /*
26456 * Make sure the vDSO gets into every core dump.
26457 * Dumping its contents makes post-mortem fully interpretable later
26458 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26459 if (compat)
26460 addr = VDSO_HIGH_BASE;
26461 else {
26462 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26463 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26464 if (IS_ERR_VALUE(addr)) {
26465 ret = addr;
26466 goto up_fail;
26467 }
26468 }
26469
26470 - current->mm->context.vdso = (void *)addr;
26471 + current->mm->context.vdso = addr;
26472
26473 if (compat_uses_vma || !compat) {
26474 /*
26475 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26476 }
26477
26478 current_thread_info()->sysenter_return =
26479 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26480 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26481
26482 up_fail:
26483 if (ret)
26484 - current->mm->context.vdso = NULL;
26485 + current->mm->context.vdso = 0;
26486
26487 up_write(&mm->mmap_sem);
26488
26489 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26490
26491 const char *arch_vma_name(struct vm_area_struct *vma)
26492 {
26493 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26494 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26495 return "[vdso]";
26496 +
26497 +#ifdef CONFIG_PAX_SEGMEXEC
26498 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26499 + return "[vdso]";
26500 +#endif
26501 +
26502 return NULL;
26503 }
26504
26505 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26506 * Check to see if the corresponding task was created in compat vdso
26507 * mode.
26508 */
26509 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26510 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26511 return &gate_vma;
26512 return NULL;
26513 }
26514 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26515 index 153407c..611cba9 100644
26516 --- a/arch/x86/vdso/vma.c
26517 +++ b/arch/x86/vdso/vma.c
26518 @@ -16,8 +16,6 @@
26519 #include <asm/vdso.h>
26520 #include <asm/page.h>
26521
26522 -unsigned int __read_mostly vdso_enabled = 1;
26523 -
26524 extern char vdso_start[], vdso_end[];
26525 extern unsigned short vdso_sync_cpuid;
26526
26527 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26528 * unaligned here as a result of stack start randomization.
26529 */
26530 addr = PAGE_ALIGN(addr);
26531 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26532
26533 return addr;
26534 }
26535 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26536 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26537 {
26538 struct mm_struct *mm = current->mm;
26539 - unsigned long addr;
26540 + unsigned long addr = 0;
26541 int ret;
26542
26543 - if (!vdso_enabled)
26544 - return 0;
26545 -
26546 down_write(&mm->mmap_sem);
26547 +
26548 +#ifdef CONFIG_PAX_RANDMMAP
26549 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26550 +#endif
26551 +
26552 addr = vdso_addr(mm->start_stack, vdso_size);
26553 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26554 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26555 if (IS_ERR_VALUE(addr)) {
26556 ret = addr;
26557 goto up_fail;
26558 }
26559
26560 - current->mm->context.vdso = (void *)addr;
26561 + mm->context.vdso = addr;
26562
26563 ret = install_special_mapping(mm, addr, vdso_size,
26564 VM_READ|VM_EXEC|
26565 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26566 VM_ALWAYSDUMP,
26567 vdso_pages);
26568 - if (ret) {
26569 - current->mm->context.vdso = NULL;
26570 - goto up_fail;
26571 - }
26572 +
26573 + if (ret)
26574 + mm->context.vdso = 0;
26575
26576 up_fail:
26577 up_write(&mm->mmap_sem);
26578 return ret;
26579 }
26580 -
26581 -static __init int vdso_setup(char *s)
26582 -{
26583 - vdso_enabled = simple_strtoul(s, NULL, 0);
26584 - return 0;
26585 -}
26586 -__setup("vdso=", vdso_setup);
26587 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26588 index 4172af8..2c8ed7f 100644
26589 --- a/arch/x86/xen/enlighten.c
26590 +++ b/arch/x86/xen/enlighten.c
26591 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26592
26593 struct shared_info xen_dummy_shared_info;
26594
26595 -void *xen_initial_gdt;
26596 -
26597 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26598 __read_mostly int xen_have_vector_callback;
26599 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26600 @@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26601 #endif
26602 };
26603
26604 -static void xen_reboot(int reason)
26605 +static __noreturn void xen_reboot(int reason)
26606 {
26607 struct sched_shutdown r = { .reason = reason };
26608
26609 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26610 - BUG();
26611 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26612 + BUG();
26613 }
26614
26615 -static void xen_restart(char *msg)
26616 +static __noreturn void xen_restart(char *msg)
26617 {
26618 xen_reboot(SHUTDOWN_reboot);
26619 }
26620
26621 -static void xen_emergency_restart(void)
26622 +static __noreturn void xen_emergency_restart(void)
26623 {
26624 xen_reboot(SHUTDOWN_reboot);
26625 }
26626
26627 -static void xen_machine_halt(void)
26628 +static __noreturn void xen_machine_halt(void)
26629 {
26630 xen_reboot(SHUTDOWN_poweroff);
26631 }
26632
26633 -static void xen_machine_power_off(void)
26634 +static __noreturn void xen_machine_power_off(void)
26635 {
26636 if (pm_power_off)
26637 pm_power_off();
26638 @@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
26639 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26640
26641 /* Work out if we support NX */
26642 - x86_configure_nx();
26643 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26644 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26645 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26646 + unsigned l, h;
26647 +
26648 + __supported_pte_mask |= _PAGE_NX;
26649 + rdmsr(MSR_EFER, l, h);
26650 + l |= EFER_NX;
26651 + wrmsr(MSR_EFER, l, h);
26652 + }
26653 +#endif
26654
26655 xen_setup_features();
26656
26657 @@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
26658
26659 machine_ops = xen_machine_ops;
26660
26661 - /*
26662 - * The only reliable way to retain the initial address of the
26663 - * percpu gdt_page is to remember it here, so we can go and
26664 - * mark it RW later, when the initial percpu area is freed.
26665 - */
26666 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26667 -
26668 xen_smp_init();
26669
26670 #ifdef CONFIG_ACPI_NUMA
26671 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26672 index 95c1cf6..4bfa5be 100644
26673 --- a/arch/x86/xen/mmu.c
26674 +++ b/arch/x86/xen/mmu.c
26675 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26676 convert_pfn_mfn(init_level4_pgt);
26677 convert_pfn_mfn(level3_ident_pgt);
26678 convert_pfn_mfn(level3_kernel_pgt);
26679 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26680 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26681 + convert_pfn_mfn(level3_vmemmap_pgt);
26682
26683 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26684 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26685 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26686 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26687 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26688 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26689 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26690 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26691 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26692 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26693 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26694 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26695 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26696
26697 @@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
26698 pv_mmu_ops.set_pud = xen_set_pud;
26699 #if PAGETABLE_LEVELS == 4
26700 pv_mmu_ops.set_pgd = xen_set_pgd;
26701 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26702 #endif
26703
26704 /* This will work as long as patching hasn't happened yet
26705 @@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26706 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26707 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26708 .set_pgd = xen_set_pgd_hyper,
26709 + .set_pgd_batched = xen_set_pgd_hyper,
26710
26711 .alloc_pud = xen_alloc_pmd_init,
26712 .release_pud = xen_release_pmd_init,
26713 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26714 index 501d4e0..e877605 100644
26715 --- a/arch/x86/xen/smp.c
26716 +++ b/arch/x86/xen/smp.c
26717 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26718 {
26719 BUG_ON(smp_processor_id() != 0);
26720 native_smp_prepare_boot_cpu();
26721 -
26722 - /* We've switched to the "real" per-cpu gdt, so make sure the
26723 - old memory can be recycled */
26724 - make_lowmem_page_readwrite(xen_initial_gdt);
26725 -
26726 xen_filter_cpu_maps();
26727 xen_setup_vcpu_info_placement();
26728 }
26729 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26730 gdt = get_cpu_gdt_table(cpu);
26731
26732 ctxt->flags = VGCF_IN_KERNEL;
26733 - ctxt->user_regs.ds = __USER_DS;
26734 - ctxt->user_regs.es = __USER_DS;
26735 + ctxt->user_regs.ds = __KERNEL_DS;
26736 + ctxt->user_regs.es = __KERNEL_DS;
26737 ctxt->user_regs.ss = __KERNEL_DS;
26738 #ifdef CONFIG_X86_32
26739 ctxt->user_regs.fs = __KERNEL_PERCPU;
26740 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26741 + savesegment(gs, ctxt->user_regs.gs);
26742 #else
26743 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26744 #endif
26745 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26746 int rc;
26747
26748 per_cpu(current_task, cpu) = idle;
26749 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26750 #ifdef CONFIG_X86_32
26751 irq_ctx_init(cpu);
26752 #else
26753 clear_tsk_thread_flag(idle, TIF_FORK);
26754 - per_cpu(kernel_stack, cpu) =
26755 - (unsigned long)task_stack_page(idle) -
26756 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26757 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26758 #endif
26759 xen_setup_runstate_info(cpu);
26760 xen_setup_timer(cpu);
26761 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26762 index b040b0e..8cc4fe0 100644
26763 --- a/arch/x86/xen/xen-asm_32.S
26764 +++ b/arch/x86/xen/xen-asm_32.S
26765 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26766 ESP_OFFSET=4 # bytes pushed onto stack
26767
26768 /*
26769 - * Store vcpu_info pointer for easy access. Do it this way to
26770 - * avoid having to reload %fs
26771 + * Store vcpu_info pointer for easy access.
26772 */
26773 #ifdef CONFIG_SMP
26774 - GET_THREAD_INFO(%eax)
26775 - movl TI_cpu(%eax), %eax
26776 - movl __per_cpu_offset(,%eax,4), %eax
26777 - mov xen_vcpu(%eax), %eax
26778 + push %fs
26779 + mov $(__KERNEL_PERCPU), %eax
26780 + mov %eax, %fs
26781 + mov PER_CPU_VAR(xen_vcpu), %eax
26782 + pop %fs
26783 #else
26784 movl xen_vcpu, %eax
26785 #endif
26786 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26787 index aaa7291..3f77960 100644
26788 --- a/arch/x86/xen/xen-head.S
26789 +++ b/arch/x86/xen/xen-head.S
26790 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
26791 #ifdef CONFIG_X86_32
26792 mov %esi,xen_start_info
26793 mov $init_thread_union+THREAD_SIZE,%esp
26794 +#ifdef CONFIG_SMP
26795 + movl $cpu_gdt_table,%edi
26796 + movl $__per_cpu_load,%eax
26797 + movw %ax,__KERNEL_PERCPU + 2(%edi)
26798 + rorl $16,%eax
26799 + movb %al,__KERNEL_PERCPU + 4(%edi)
26800 + movb %ah,__KERNEL_PERCPU + 7(%edi)
26801 + movl $__per_cpu_end - 1,%eax
26802 + subl $__per_cpu_start,%eax
26803 + movw %ax,__KERNEL_PERCPU + 0(%edi)
26804 +#endif
26805 #else
26806 mov %rsi,xen_start_info
26807 mov $init_thread_union+THREAD_SIZE,%rsp
26808 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26809 index b095739..8c17bcd 100644
26810 --- a/arch/x86/xen/xen-ops.h
26811 +++ b/arch/x86/xen/xen-ops.h
26812 @@ -10,8 +10,6 @@
26813 extern const char xen_hypervisor_callback[];
26814 extern const char xen_failsafe_callback[];
26815
26816 -extern void *xen_initial_gdt;
26817 -
26818 struct trap_info;
26819 void xen_copy_trap_info(struct trap_info *traps);
26820
26821 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26822 index 525bd3d..ef888b1 100644
26823 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
26824 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26825 @@ -119,9 +119,9 @@
26826 ----------------------------------------------------------------------*/
26827
26828 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26829 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26830 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26831 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26832 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26833
26834 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26835 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26836 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26837 index 2f33760..835e50a 100644
26838 --- a/arch/xtensa/variants/fsf/include/variant/core.h
26839 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
26840 @@ -11,6 +11,7 @@
26841 #ifndef _XTENSA_CORE_H
26842 #define _XTENSA_CORE_H
26843
26844 +#include <linux/const.h>
26845
26846 /****************************************************************************
26847 Parameters Useful for Any Code, USER or PRIVILEGED
26848 @@ -112,9 +113,9 @@
26849 ----------------------------------------------------------------------*/
26850
26851 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26852 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26853 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26854 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26855 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26856
26857 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26858 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26859 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26860 index af00795..2bb8105 100644
26861 --- a/arch/xtensa/variants/s6000/include/variant/core.h
26862 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
26863 @@ -11,6 +11,7 @@
26864 #ifndef _XTENSA_CORE_CONFIGURATION_H
26865 #define _XTENSA_CORE_CONFIGURATION_H
26866
26867 +#include <linux/const.h>
26868
26869 /****************************************************************************
26870 Parameters Useful for Any Code, USER or PRIVILEGED
26871 @@ -118,9 +119,9 @@
26872 ----------------------------------------------------------------------*/
26873
26874 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26875 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26876 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26877 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26878 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26879
26880 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26881 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26882 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26883 index 58916af..9cb880b 100644
26884 --- a/block/blk-iopoll.c
26885 +++ b/block/blk-iopoll.c
26886 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26887 }
26888 EXPORT_SYMBOL(blk_iopoll_complete);
26889
26890 -static void blk_iopoll_softirq(struct softirq_action *h)
26891 +static void blk_iopoll_softirq(void)
26892 {
26893 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26894 int rearm = 0, budget = blk_iopoll_budget;
26895 diff --git a/block/blk-map.c b/block/blk-map.c
26896 index 623e1cd..ca1e109 100644
26897 --- a/block/blk-map.c
26898 +++ b/block/blk-map.c
26899 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26900 if (!len || !kbuf)
26901 return -EINVAL;
26902
26903 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26904 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26905 if (do_copy)
26906 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26907 else
26908 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26909 index 1366a89..e17f54b 100644
26910 --- a/block/blk-softirq.c
26911 +++ b/block/blk-softirq.c
26912 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26913 * Softirq action handler - move entries to local list and loop over them
26914 * while passing them to the queue registered handler.
26915 */
26916 -static void blk_done_softirq(struct softirq_action *h)
26917 +static void blk_done_softirq(void)
26918 {
26919 struct list_head *cpu_list, local_list;
26920
26921 diff --git a/block/bsg.c b/block/bsg.c
26922 index ff64ae3..593560c 100644
26923 --- a/block/bsg.c
26924 +++ b/block/bsg.c
26925 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26926 struct sg_io_v4 *hdr, struct bsg_device *bd,
26927 fmode_t has_write_perm)
26928 {
26929 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26930 + unsigned char *cmdptr;
26931 +
26932 if (hdr->request_len > BLK_MAX_CDB) {
26933 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26934 if (!rq->cmd)
26935 return -ENOMEM;
26936 - }
26937 + cmdptr = rq->cmd;
26938 + } else
26939 + cmdptr = tmpcmd;
26940
26941 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26942 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26943 hdr->request_len))
26944 return -EFAULT;
26945
26946 + if (cmdptr != rq->cmd)
26947 + memcpy(rq->cmd, cmdptr, hdr->request_len);
26948 +
26949 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26950 if (blk_verify_command(rq->cmd, has_write_perm))
26951 return -EPERM;
26952 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26953 index 7c668c8..db3521c 100644
26954 --- a/block/compat_ioctl.c
26955 +++ b/block/compat_ioctl.c
26956 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26957 err |= __get_user(f->spec1, &uf->spec1);
26958 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26959 err |= __get_user(name, &uf->name);
26960 - f->name = compat_ptr(name);
26961 + f->name = (void __force_kernel *)compat_ptr(name);
26962 if (err) {
26963 err = -EFAULT;
26964 goto out;
26965 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
26966 index 6296b40..417c00f 100644
26967 --- a/block/partitions/efi.c
26968 +++ b/block/partitions/efi.c
26969 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
26970 if (!gpt)
26971 return NULL;
26972
26973 + if (!le32_to_cpu(gpt->num_partition_entries))
26974 + return NULL;
26975 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
26976 + if (!pte)
26977 + return NULL;
26978 +
26979 count = le32_to_cpu(gpt->num_partition_entries) *
26980 le32_to_cpu(gpt->sizeof_partition_entry);
26981 - if (!count)
26982 - return NULL;
26983 - pte = kzalloc(count, GFP_KERNEL);
26984 - if (!pte)
26985 - return NULL;
26986 -
26987 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
26988 (u8 *) pte,
26989 count) < count) {
26990 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26991 index 260fa80..e8f3caf 100644
26992 --- a/block/scsi_ioctl.c
26993 +++ b/block/scsi_ioctl.c
26994 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26995 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26996 struct sg_io_hdr *hdr, fmode_t mode)
26997 {
26998 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26999 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27000 + unsigned char *cmdptr;
27001 +
27002 + if (rq->cmd != rq->__cmd)
27003 + cmdptr = rq->cmd;
27004 + else
27005 + cmdptr = tmpcmd;
27006 +
27007 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27008 return -EFAULT;
27009 +
27010 + if (cmdptr != rq->cmd)
27011 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27012 +
27013 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27014 return -EPERM;
27015
27016 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27017 int err;
27018 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27019 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27020 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27021 + unsigned char *cmdptr;
27022
27023 if (!sic)
27024 return -EINVAL;
27025 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27026 */
27027 err = -EFAULT;
27028 rq->cmd_len = cmdlen;
27029 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27030 +
27031 + if (rq->cmd != rq->__cmd)
27032 + cmdptr = rq->cmd;
27033 + else
27034 + cmdptr = tmpcmd;
27035 +
27036 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27037 goto error;
27038
27039 + if (rq->cmd != cmdptr)
27040 + memcpy(rq->cmd, cmdptr, cmdlen);
27041 +
27042 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27043 goto error;
27044
27045 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
27046 index a0f768c..1da9c73 100644
27047 --- a/crypto/ablkcipher.c
27048 +++ b/crypto/ablkcipher.c
27049 @@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req,
27050 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
27051
27052 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27053 + unsigned int keylen) __size_overflow(3);
27054 +static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27055 unsigned int keylen)
27056 {
27057 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27058 @@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27059 }
27060
27061 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27062 + unsigned int keylen) __size_overflow(3);
27063 +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27064 unsigned int keylen)
27065 {
27066 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27067 diff --git a/crypto/aead.c b/crypto/aead.c
27068 index 04add3dc..983032f 100644
27069 --- a/crypto/aead.c
27070 +++ b/crypto/aead.c
27071 @@ -27,6 +27,8 @@
27072 #include "internal.h"
27073
27074 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27075 + unsigned int keylen) __size_overflow(3);
27076 +static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27077 unsigned int keylen)
27078 {
27079 struct aead_alg *aead = crypto_aead_alg(tfm);
27080 @@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27081 return ret;
27082 }
27083
27084 +static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27085 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
27086 {
27087 struct aead_alg *aead = crypto_aead_alg(tfm);
27088 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
27089 index 1e61d1a..cf06b86 100644
27090 --- a/crypto/blkcipher.c
27091 +++ b/crypto/blkcipher.c
27092 @@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
27093 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
27094
27095 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27096 + unsigned int keylen) __size_overflow(3);
27097 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27098 unsigned int keylen)
27099 {
27100 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27101 @@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27102 return ret;
27103 }
27104
27105 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27106 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27107 {
27108 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27109 diff --git a/crypto/cipher.c b/crypto/cipher.c
27110 index 39541e0..802d956 100644
27111 --- a/crypto/cipher.c
27112 +++ b/crypto/cipher.c
27113 @@ -21,6 +21,8 @@
27114 #include "internal.h"
27115
27116 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27117 + unsigned int keylen) __size_overflow(3);
27118 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27119 unsigned int keylen)
27120 {
27121 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27122 @@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27123
27124 }
27125
27126 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27127 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27128 {
27129 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27130 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27131 index 671d4d6..5f24030 100644
27132 --- a/crypto/cryptd.c
27133 +++ b/crypto/cryptd.c
27134 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27135
27136 struct cryptd_blkcipher_request_ctx {
27137 crypto_completion_t complete;
27138 -};
27139 +} __no_const;
27140
27141 struct cryptd_hash_ctx {
27142 struct crypto_shash *child;
27143 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27144
27145 struct cryptd_aead_request_ctx {
27146 crypto_completion_t complete;
27147 -};
27148 +} __no_const;
27149
27150 static void cryptd_queue_worker(struct work_struct *work);
27151
27152 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27153 index 5d41894..22021e4 100644
27154 --- a/drivers/acpi/apei/cper.c
27155 +++ b/drivers/acpi/apei/cper.c
27156 @@ -38,12 +38,12 @@
27157 */
27158 u64 cper_next_record_id(void)
27159 {
27160 - static atomic64_t seq;
27161 + static atomic64_unchecked_t seq;
27162
27163 - if (!atomic64_read(&seq))
27164 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27165 + if (!atomic64_read_unchecked(&seq))
27166 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27167
27168 - return atomic64_inc_return(&seq);
27169 + return atomic64_inc_return_unchecked(&seq);
27170 }
27171 EXPORT_SYMBOL_GPL(cper_next_record_id);
27172
27173 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27174 index 86933ca..5cb1a69 100644
27175 --- a/drivers/acpi/battery.c
27176 +++ b/drivers/acpi/battery.c
27177 @@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
27178
27179 static ssize_t acpi_battery_write_alarm(struct file *file,
27180 const char __user * buffer,
27181 + size_t count, loff_t * ppos) __size_overflow(3);
27182 +static ssize_t acpi_battery_write_alarm(struct file *file,
27183 + const char __user * buffer,
27184 size_t count, loff_t * ppos)
27185 {
27186 int result = 0;
27187 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27188 index b258cab..3fb7da7 100644
27189 --- a/drivers/acpi/ec_sys.c
27190 +++ b/drivers/acpi/ec_sys.c
27191 @@ -12,6 +12,7 @@
27192 #include <linux/acpi.h>
27193 #include <linux/debugfs.h>
27194 #include <linux/module.h>
27195 +#include <linux/uaccess.h>
27196 #include "internal.h"
27197
27198 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27199 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27200 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27201 */
27202 unsigned int size = EC_SPACE_SIZE;
27203 - u8 *data = (u8 *) buf;
27204 + u8 data;
27205 loff_t init_off = *off;
27206 int err = 0;
27207
27208 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27209 size = count;
27210
27211 while (size) {
27212 - err = ec_read(*off, &data[*off - init_off]);
27213 + err = ec_read(*off, &data);
27214 if (err)
27215 return err;
27216 + if (put_user(data, &buf[*off - init_off]))
27217 + return -EFAULT;
27218 *off += 1;
27219 size--;
27220 }
27221 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27222
27223 unsigned int size = count;
27224 loff_t init_off = *off;
27225 - u8 *data = (u8 *) buf;
27226 int err = 0;
27227
27228 if (*off >= EC_SPACE_SIZE)
27229 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27230 }
27231
27232 while (size) {
27233 - u8 byte_write = data[*off - init_off];
27234 + u8 byte_write;
27235 + if (get_user(byte_write, &buf[*off - init_off]))
27236 + return -EFAULT;
27237 err = ec_write(*off, byte_write);
27238 if (err)
27239 return err;
27240 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27241 index 251c7b62..000462d 100644
27242 --- a/drivers/acpi/proc.c
27243 +++ b/drivers/acpi/proc.c
27244 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27245 size_t count, loff_t * ppos)
27246 {
27247 struct list_head *node, *next;
27248 - char strbuf[5];
27249 - char str[5] = "";
27250 - unsigned int len = count;
27251 + char strbuf[5] = {0};
27252
27253 - if (len > 4)
27254 - len = 4;
27255 - if (len < 0)
27256 + if (count > 4)
27257 + count = 4;
27258 + if (copy_from_user(strbuf, buffer, count))
27259 return -EFAULT;
27260 -
27261 - if (copy_from_user(strbuf, buffer, len))
27262 - return -EFAULT;
27263 - strbuf[len] = '\0';
27264 - sscanf(strbuf, "%s", str);
27265 + strbuf[count] = '\0';
27266
27267 mutex_lock(&acpi_device_lock);
27268 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27269 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27270 if (!dev->wakeup.flags.valid)
27271 continue;
27272
27273 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27274 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27275 if (device_can_wakeup(&dev->dev)) {
27276 bool enable = !device_may_wakeup(&dev->dev);
27277 device_set_wakeup_enable(&dev->dev, enable);
27278 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27279 index 8ae05ce..7dbbed9 100644
27280 --- a/drivers/acpi/processor_driver.c
27281 +++ b/drivers/acpi/processor_driver.c
27282 @@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27283 return 0;
27284 #endif
27285
27286 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27287 + BUG_ON(pr->id >= nr_cpu_ids);
27288
27289 /*
27290 * Buggy BIOS check
27291 diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
27292 index 6e36d0c..f319944 100644
27293 --- a/drivers/acpi/sbs.c
27294 +++ b/drivers/acpi/sbs.c
27295 @@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
27296
27297 static ssize_t
27298 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27299 + size_t count, loff_t * ppos) __size_overflow(3);
27300 +static ssize_t
27301 +acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27302 size_t count, loff_t * ppos)
27303 {
27304 struct seq_file *seq = file->private_data;
27305 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27306 index c06e0ec..a2c06ba 100644
27307 --- a/drivers/ata/libata-core.c
27308 +++ b/drivers/ata/libata-core.c
27309 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27310 struct ata_port *ap;
27311 unsigned int tag;
27312
27313 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27314 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27315 ap = qc->ap;
27316
27317 qc->flags = 0;
27318 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27319 struct ata_port *ap;
27320 struct ata_link *link;
27321
27322 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27323 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27324 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27325 ap = qc->ap;
27326 link = qc->dev->link;
27327 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27328 return;
27329
27330 spin_lock(&lock);
27331 + pax_open_kernel();
27332
27333 for (cur = ops->inherits; cur; cur = cur->inherits) {
27334 void **inherit = (void **)cur;
27335 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27336 if (IS_ERR(*pp))
27337 *pp = NULL;
27338
27339 - ops->inherits = NULL;
27340 + *(struct ata_port_operations **)&ops->inherits = NULL;
27341
27342 + pax_close_kernel();
27343 spin_unlock(&lock);
27344 }
27345
27346 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27347 index 048589f..4002b98 100644
27348 --- a/drivers/ata/pata_arasan_cf.c
27349 +++ b/drivers/ata/pata_arasan_cf.c
27350 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27351 /* Handle platform specific quirks */
27352 if (pdata->quirk) {
27353 if (pdata->quirk & CF_BROKEN_PIO) {
27354 - ap->ops->set_piomode = NULL;
27355 + pax_open_kernel();
27356 + *(void **)&ap->ops->set_piomode = NULL;
27357 + pax_close_kernel();
27358 ap->pio_mask = 0;
27359 }
27360 if (pdata->quirk & CF_BROKEN_MWDMA)
27361 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27362 index f9b983a..887b9d8 100644
27363 --- a/drivers/atm/adummy.c
27364 +++ b/drivers/atm/adummy.c
27365 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27366 vcc->pop(vcc, skb);
27367 else
27368 dev_kfree_skb_any(skb);
27369 - atomic_inc(&vcc->stats->tx);
27370 + atomic_inc_unchecked(&vcc->stats->tx);
27371
27372 return 0;
27373 }
27374 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27375 index f8f41e0..1f987dd 100644
27376 --- a/drivers/atm/ambassador.c
27377 +++ b/drivers/atm/ambassador.c
27378 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27379 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27380
27381 // VC layer stats
27382 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27383 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27384
27385 // free the descriptor
27386 kfree (tx_descr);
27387 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27388 dump_skb ("<<<", vc, skb);
27389
27390 // VC layer stats
27391 - atomic_inc(&atm_vcc->stats->rx);
27392 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27393 __net_timestamp(skb);
27394 // end of our responsibility
27395 atm_vcc->push (atm_vcc, skb);
27396 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27397 } else {
27398 PRINTK (KERN_INFO, "dropped over-size frame");
27399 // should we count this?
27400 - atomic_inc(&atm_vcc->stats->rx_drop);
27401 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27402 }
27403
27404 } else {
27405 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27406 }
27407
27408 if (check_area (skb->data, skb->len)) {
27409 - atomic_inc(&atm_vcc->stats->tx_err);
27410 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27411 return -ENOMEM; // ?
27412 }
27413
27414 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27415 index b22d71c..d6e1049 100644
27416 --- a/drivers/atm/atmtcp.c
27417 +++ b/drivers/atm/atmtcp.c
27418 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27419 if (vcc->pop) vcc->pop(vcc,skb);
27420 else dev_kfree_skb(skb);
27421 if (dev_data) return 0;
27422 - atomic_inc(&vcc->stats->tx_err);
27423 + atomic_inc_unchecked(&vcc->stats->tx_err);
27424 return -ENOLINK;
27425 }
27426 size = skb->len+sizeof(struct atmtcp_hdr);
27427 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27428 if (!new_skb) {
27429 if (vcc->pop) vcc->pop(vcc,skb);
27430 else dev_kfree_skb(skb);
27431 - atomic_inc(&vcc->stats->tx_err);
27432 + atomic_inc_unchecked(&vcc->stats->tx_err);
27433 return -ENOBUFS;
27434 }
27435 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27436 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27437 if (vcc->pop) vcc->pop(vcc,skb);
27438 else dev_kfree_skb(skb);
27439 out_vcc->push(out_vcc,new_skb);
27440 - atomic_inc(&vcc->stats->tx);
27441 - atomic_inc(&out_vcc->stats->rx);
27442 + atomic_inc_unchecked(&vcc->stats->tx);
27443 + atomic_inc_unchecked(&out_vcc->stats->rx);
27444 return 0;
27445 }
27446
27447 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27448 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27449 read_unlock(&vcc_sklist_lock);
27450 if (!out_vcc) {
27451 - atomic_inc(&vcc->stats->tx_err);
27452 + atomic_inc_unchecked(&vcc->stats->tx_err);
27453 goto done;
27454 }
27455 skb_pull(skb,sizeof(struct atmtcp_hdr));
27456 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27457 __net_timestamp(new_skb);
27458 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27459 out_vcc->push(out_vcc,new_skb);
27460 - atomic_inc(&vcc->stats->tx);
27461 - atomic_inc(&out_vcc->stats->rx);
27462 + atomic_inc_unchecked(&vcc->stats->tx);
27463 + atomic_inc_unchecked(&out_vcc->stats->rx);
27464 done:
27465 if (vcc->pop) vcc->pop(vcc,skb);
27466 else dev_kfree_skb(skb);
27467 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27468 index 956e9ac..133516d 100644
27469 --- a/drivers/atm/eni.c
27470 +++ b/drivers/atm/eni.c
27471 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27472 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27473 vcc->dev->number);
27474 length = 0;
27475 - atomic_inc(&vcc->stats->rx_err);
27476 + atomic_inc_unchecked(&vcc->stats->rx_err);
27477 }
27478 else {
27479 length = ATM_CELL_SIZE-1; /* no HEC */
27480 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27481 size);
27482 }
27483 eff = length = 0;
27484 - atomic_inc(&vcc->stats->rx_err);
27485 + atomic_inc_unchecked(&vcc->stats->rx_err);
27486 }
27487 else {
27488 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27489 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27490 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27491 vcc->dev->number,vcc->vci,length,size << 2,descr);
27492 length = eff = 0;
27493 - atomic_inc(&vcc->stats->rx_err);
27494 + atomic_inc_unchecked(&vcc->stats->rx_err);
27495 }
27496 }
27497 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27498 @@ -771,7 +771,7 @@ rx_dequeued++;
27499 vcc->push(vcc,skb);
27500 pushed++;
27501 }
27502 - atomic_inc(&vcc->stats->rx);
27503 + atomic_inc_unchecked(&vcc->stats->rx);
27504 }
27505 wake_up(&eni_dev->rx_wait);
27506 }
27507 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
27508 PCI_DMA_TODEVICE);
27509 if (vcc->pop) vcc->pop(vcc,skb);
27510 else dev_kfree_skb_irq(skb);
27511 - atomic_inc(&vcc->stats->tx);
27512 + atomic_inc_unchecked(&vcc->stats->tx);
27513 wake_up(&eni_dev->tx_wait);
27514 dma_complete++;
27515 }
27516 @@ -1569,7 +1569,7 @@ tx_complete++;
27517 /*--------------------------------- entries ---------------------------------*/
27518
27519
27520 -static const char *media_name[] __devinitdata = {
27521 +static const char *media_name[] __devinitconst = {
27522 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27523 "UTP", "05?", "06?", "07?", /* 4- 7 */
27524 "TAXI","09?", "10?", "11?", /* 8-11 */
27525 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27526 index 5072f8a..fa52520d 100644
27527 --- a/drivers/atm/firestream.c
27528 +++ b/drivers/atm/firestream.c
27529 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27530 }
27531 }
27532
27533 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27534 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27535
27536 fs_dprintk (FS_DEBUG_TXMEM, "i");
27537 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27538 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27539 #endif
27540 skb_put (skb, qe->p1 & 0xffff);
27541 ATM_SKB(skb)->vcc = atm_vcc;
27542 - atomic_inc(&atm_vcc->stats->rx);
27543 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27544 __net_timestamp(skb);
27545 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27546 atm_vcc->push (atm_vcc, skb);
27547 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27548 kfree (pe);
27549 }
27550 if (atm_vcc)
27551 - atomic_inc(&atm_vcc->stats->rx_drop);
27552 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27553 break;
27554 case 0x1f: /* Reassembly abort: no buffers. */
27555 /* Silently increment error counter. */
27556 if (atm_vcc)
27557 - atomic_inc(&atm_vcc->stats->rx_drop);
27558 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27559 break;
27560 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27561 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27562 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27563 index 361f5ae..7fc552d 100644
27564 --- a/drivers/atm/fore200e.c
27565 +++ b/drivers/atm/fore200e.c
27566 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27567 #endif
27568 /* check error condition */
27569 if (*entry->status & STATUS_ERROR)
27570 - atomic_inc(&vcc->stats->tx_err);
27571 + atomic_inc_unchecked(&vcc->stats->tx_err);
27572 else
27573 - atomic_inc(&vcc->stats->tx);
27574 + atomic_inc_unchecked(&vcc->stats->tx);
27575 }
27576 }
27577
27578 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27579 if (skb == NULL) {
27580 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27581
27582 - atomic_inc(&vcc->stats->rx_drop);
27583 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27584 return -ENOMEM;
27585 }
27586
27587 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27588
27589 dev_kfree_skb_any(skb);
27590
27591 - atomic_inc(&vcc->stats->rx_drop);
27592 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27593 return -ENOMEM;
27594 }
27595
27596 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27597
27598 vcc->push(vcc, skb);
27599 - atomic_inc(&vcc->stats->rx);
27600 + atomic_inc_unchecked(&vcc->stats->rx);
27601
27602 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27603
27604 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27605 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27606 fore200e->atm_dev->number,
27607 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27608 - atomic_inc(&vcc->stats->rx_err);
27609 + atomic_inc_unchecked(&vcc->stats->rx_err);
27610 }
27611 }
27612
27613 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27614 goto retry_here;
27615 }
27616
27617 - atomic_inc(&vcc->stats->tx_err);
27618 + atomic_inc_unchecked(&vcc->stats->tx_err);
27619
27620 fore200e->tx_sat++;
27621 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27622 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27623 index b182c2f..1c6fa8a 100644
27624 --- a/drivers/atm/he.c
27625 +++ b/drivers/atm/he.c
27626 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27627
27628 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27629 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27630 - atomic_inc(&vcc->stats->rx_drop);
27631 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27632 goto return_host_buffers;
27633 }
27634
27635 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27636 RBRQ_LEN_ERR(he_dev->rbrq_head)
27637 ? "LEN_ERR" : "",
27638 vcc->vpi, vcc->vci);
27639 - atomic_inc(&vcc->stats->rx_err);
27640 + atomic_inc_unchecked(&vcc->stats->rx_err);
27641 goto return_host_buffers;
27642 }
27643
27644 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27645 vcc->push(vcc, skb);
27646 spin_lock(&he_dev->global_lock);
27647
27648 - atomic_inc(&vcc->stats->rx);
27649 + atomic_inc_unchecked(&vcc->stats->rx);
27650
27651 return_host_buffers:
27652 ++pdus_assembled;
27653 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27654 tpd->vcc->pop(tpd->vcc, tpd->skb);
27655 else
27656 dev_kfree_skb_any(tpd->skb);
27657 - atomic_inc(&tpd->vcc->stats->tx_err);
27658 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27659 }
27660 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27661 return;
27662 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27663 vcc->pop(vcc, skb);
27664 else
27665 dev_kfree_skb_any(skb);
27666 - atomic_inc(&vcc->stats->tx_err);
27667 + atomic_inc_unchecked(&vcc->stats->tx_err);
27668 return -EINVAL;
27669 }
27670
27671 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27672 vcc->pop(vcc, skb);
27673 else
27674 dev_kfree_skb_any(skb);
27675 - atomic_inc(&vcc->stats->tx_err);
27676 + atomic_inc_unchecked(&vcc->stats->tx_err);
27677 return -EINVAL;
27678 }
27679 #endif
27680 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27681 vcc->pop(vcc, skb);
27682 else
27683 dev_kfree_skb_any(skb);
27684 - atomic_inc(&vcc->stats->tx_err);
27685 + atomic_inc_unchecked(&vcc->stats->tx_err);
27686 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27687 return -ENOMEM;
27688 }
27689 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27690 vcc->pop(vcc, skb);
27691 else
27692 dev_kfree_skb_any(skb);
27693 - atomic_inc(&vcc->stats->tx_err);
27694 + atomic_inc_unchecked(&vcc->stats->tx_err);
27695 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27696 return -ENOMEM;
27697 }
27698 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27699 __enqueue_tpd(he_dev, tpd, cid);
27700 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27701
27702 - atomic_inc(&vcc->stats->tx);
27703 + atomic_inc_unchecked(&vcc->stats->tx);
27704
27705 return 0;
27706 }
27707 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27708 index b812103..e391a49 100644
27709 --- a/drivers/atm/horizon.c
27710 +++ b/drivers/atm/horizon.c
27711 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27712 {
27713 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27714 // VC layer stats
27715 - atomic_inc(&vcc->stats->rx);
27716 + atomic_inc_unchecked(&vcc->stats->rx);
27717 __net_timestamp(skb);
27718 // end of our responsibility
27719 vcc->push (vcc, skb);
27720 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27721 dev->tx_iovec = NULL;
27722
27723 // VC layer stats
27724 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27725 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27726
27727 // free the skb
27728 hrz_kfree_skb (skb);
27729 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27730 index 1c05212..c28e200 100644
27731 --- a/drivers/atm/idt77252.c
27732 +++ b/drivers/atm/idt77252.c
27733 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27734 else
27735 dev_kfree_skb(skb);
27736
27737 - atomic_inc(&vcc->stats->tx);
27738 + atomic_inc_unchecked(&vcc->stats->tx);
27739 }
27740
27741 atomic_dec(&scq->used);
27742 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27743 if ((sb = dev_alloc_skb(64)) == NULL) {
27744 printk("%s: Can't allocate buffers for aal0.\n",
27745 card->name);
27746 - atomic_add(i, &vcc->stats->rx_drop);
27747 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27748 break;
27749 }
27750 if (!atm_charge(vcc, sb->truesize)) {
27751 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27752 card->name);
27753 - atomic_add(i - 1, &vcc->stats->rx_drop);
27754 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27755 dev_kfree_skb(sb);
27756 break;
27757 }
27758 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27759 ATM_SKB(sb)->vcc = vcc;
27760 __net_timestamp(sb);
27761 vcc->push(vcc, sb);
27762 - atomic_inc(&vcc->stats->rx);
27763 + atomic_inc_unchecked(&vcc->stats->rx);
27764
27765 cell += ATM_CELL_PAYLOAD;
27766 }
27767 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27768 "(CDC: %08x)\n",
27769 card->name, len, rpp->len, readl(SAR_REG_CDC));
27770 recycle_rx_pool_skb(card, rpp);
27771 - atomic_inc(&vcc->stats->rx_err);
27772 + atomic_inc_unchecked(&vcc->stats->rx_err);
27773 return;
27774 }
27775 if (stat & SAR_RSQE_CRC) {
27776 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27777 recycle_rx_pool_skb(card, rpp);
27778 - atomic_inc(&vcc->stats->rx_err);
27779 + atomic_inc_unchecked(&vcc->stats->rx_err);
27780 return;
27781 }
27782 if (skb_queue_len(&rpp->queue) > 1) {
27783 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27784 RXPRINTK("%s: Can't alloc RX skb.\n",
27785 card->name);
27786 recycle_rx_pool_skb(card, rpp);
27787 - atomic_inc(&vcc->stats->rx_err);
27788 + atomic_inc_unchecked(&vcc->stats->rx_err);
27789 return;
27790 }
27791 if (!atm_charge(vcc, skb->truesize)) {
27792 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27793 __net_timestamp(skb);
27794
27795 vcc->push(vcc, skb);
27796 - atomic_inc(&vcc->stats->rx);
27797 + atomic_inc_unchecked(&vcc->stats->rx);
27798
27799 return;
27800 }
27801 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27802 __net_timestamp(skb);
27803
27804 vcc->push(vcc, skb);
27805 - atomic_inc(&vcc->stats->rx);
27806 + atomic_inc_unchecked(&vcc->stats->rx);
27807
27808 if (skb->truesize > SAR_FB_SIZE_3)
27809 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27810 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27811 if (vcc->qos.aal != ATM_AAL0) {
27812 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27813 card->name, vpi, vci);
27814 - atomic_inc(&vcc->stats->rx_drop);
27815 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27816 goto drop;
27817 }
27818
27819 if ((sb = dev_alloc_skb(64)) == NULL) {
27820 printk("%s: Can't allocate buffers for AAL0.\n",
27821 card->name);
27822 - atomic_inc(&vcc->stats->rx_err);
27823 + atomic_inc_unchecked(&vcc->stats->rx_err);
27824 goto drop;
27825 }
27826
27827 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27828 ATM_SKB(sb)->vcc = vcc;
27829 __net_timestamp(sb);
27830 vcc->push(vcc, sb);
27831 - atomic_inc(&vcc->stats->rx);
27832 + atomic_inc_unchecked(&vcc->stats->rx);
27833
27834 drop:
27835 skb_pull(queue, 64);
27836 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27837
27838 if (vc == NULL) {
27839 printk("%s: NULL connection in send().\n", card->name);
27840 - atomic_inc(&vcc->stats->tx_err);
27841 + atomic_inc_unchecked(&vcc->stats->tx_err);
27842 dev_kfree_skb(skb);
27843 return -EINVAL;
27844 }
27845 if (!test_bit(VCF_TX, &vc->flags)) {
27846 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27847 - atomic_inc(&vcc->stats->tx_err);
27848 + atomic_inc_unchecked(&vcc->stats->tx_err);
27849 dev_kfree_skb(skb);
27850 return -EINVAL;
27851 }
27852 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27853 break;
27854 default:
27855 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27856 - atomic_inc(&vcc->stats->tx_err);
27857 + atomic_inc_unchecked(&vcc->stats->tx_err);
27858 dev_kfree_skb(skb);
27859 return -EINVAL;
27860 }
27861
27862 if (skb_shinfo(skb)->nr_frags != 0) {
27863 printk("%s: No scatter-gather yet.\n", card->name);
27864 - atomic_inc(&vcc->stats->tx_err);
27865 + atomic_inc_unchecked(&vcc->stats->tx_err);
27866 dev_kfree_skb(skb);
27867 return -EINVAL;
27868 }
27869 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27870
27871 err = queue_skb(card, vc, skb, oam);
27872 if (err) {
27873 - atomic_inc(&vcc->stats->tx_err);
27874 + atomic_inc_unchecked(&vcc->stats->tx_err);
27875 dev_kfree_skb(skb);
27876 return err;
27877 }
27878 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27879 skb = dev_alloc_skb(64);
27880 if (!skb) {
27881 printk("%s: Out of memory in send_oam().\n", card->name);
27882 - atomic_inc(&vcc->stats->tx_err);
27883 + atomic_inc_unchecked(&vcc->stats->tx_err);
27884 return -ENOMEM;
27885 }
27886 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27887 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27888 index 9e373ba..cf93727 100644
27889 --- a/drivers/atm/iphase.c
27890 +++ b/drivers/atm/iphase.c
27891 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27892 status = (u_short) (buf_desc_ptr->desc_mode);
27893 if (status & (RX_CER | RX_PTE | RX_OFL))
27894 {
27895 - atomic_inc(&vcc->stats->rx_err);
27896 + atomic_inc_unchecked(&vcc->stats->rx_err);
27897 IF_ERR(printk("IA: bad packet, dropping it");)
27898 if (status & RX_CER) {
27899 IF_ERR(printk(" cause: packet CRC error\n");)
27900 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27901 len = dma_addr - buf_addr;
27902 if (len > iadev->rx_buf_sz) {
27903 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27904 - atomic_inc(&vcc->stats->rx_err);
27905 + atomic_inc_unchecked(&vcc->stats->rx_err);
27906 goto out_free_desc;
27907 }
27908
27909 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27910 ia_vcc = INPH_IA_VCC(vcc);
27911 if (ia_vcc == NULL)
27912 {
27913 - atomic_inc(&vcc->stats->rx_err);
27914 + atomic_inc_unchecked(&vcc->stats->rx_err);
27915 atm_return(vcc, skb->truesize);
27916 dev_kfree_skb_any(skb);
27917 goto INCR_DLE;
27918 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27919 if ((length > iadev->rx_buf_sz) || (length >
27920 (skb->len - sizeof(struct cpcs_trailer))))
27921 {
27922 - atomic_inc(&vcc->stats->rx_err);
27923 + atomic_inc_unchecked(&vcc->stats->rx_err);
27924 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27925 length, skb->len);)
27926 atm_return(vcc, skb->truesize);
27927 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27928
27929 IF_RX(printk("rx_dle_intr: skb push");)
27930 vcc->push(vcc,skb);
27931 - atomic_inc(&vcc->stats->rx);
27932 + atomic_inc_unchecked(&vcc->stats->rx);
27933 iadev->rx_pkt_cnt++;
27934 }
27935 INCR_DLE:
27936 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27937 {
27938 struct k_sonet_stats *stats;
27939 stats = &PRIV(_ia_dev[board])->sonet_stats;
27940 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27941 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27942 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27943 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27944 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27945 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27946 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27947 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27948 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27949 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27950 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27951 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27952 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27953 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27954 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27955 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27956 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27957 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27958 }
27959 ia_cmds.status = 0;
27960 break;
27961 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27962 if ((desc == 0) || (desc > iadev->num_tx_desc))
27963 {
27964 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27965 - atomic_inc(&vcc->stats->tx);
27966 + atomic_inc_unchecked(&vcc->stats->tx);
27967 if (vcc->pop)
27968 vcc->pop(vcc, skb);
27969 else
27970 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27971 ATM_DESC(skb) = vcc->vci;
27972 skb_queue_tail(&iadev->tx_dma_q, skb);
27973
27974 - atomic_inc(&vcc->stats->tx);
27975 + atomic_inc_unchecked(&vcc->stats->tx);
27976 iadev->tx_pkt_cnt++;
27977 /* Increment transaction counter */
27978 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27979
27980 #if 0
27981 /* add flow control logic */
27982 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27983 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27984 if (iavcc->vc_desc_cnt > 10) {
27985 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27986 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27987 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27988 index f556969..0da15eb 100644
27989 --- a/drivers/atm/lanai.c
27990 +++ b/drivers/atm/lanai.c
27991 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27992 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27993 lanai_endtx(lanai, lvcc);
27994 lanai_free_skb(lvcc->tx.atmvcc, skb);
27995 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27996 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27997 }
27998
27999 /* Try to fill the buffer - don't call unless there is backlog */
28000 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28001 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28002 __net_timestamp(skb);
28003 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28004 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28005 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28006 out:
28007 lvcc->rx.buf.ptr = end;
28008 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28009 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28010 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28011 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28012 lanai->stats.service_rxnotaal5++;
28013 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28014 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28015 return 0;
28016 }
28017 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28018 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28019 int bytes;
28020 read_unlock(&vcc_sklist_lock);
28021 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28022 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28023 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28024 lvcc->stats.x.aal5.service_trash++;
28025 bytes = (SERVICE_GET_END(s) * 16) -
28026 (((unsigned long) lvcc->rx.buf.ptr) -
28027 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28028 }
28029 if (s & SERVICE_STREAM) {
28030 read_unlock(&vcc_sklist_lock);
28031 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28032 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28033 lvcc->stats.x.aal5.service_stream++;
28034 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28035 "PDU on VCI %d!\n", lanai->number, vci);
28036 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28037 return 0;
28038 }
28039 DPRINTK("got rx crc error on vci %d\n", vci);
28040 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28041 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28042 lvcc->stats.x.aal5.service_rxcrc++;
28043 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28044 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28045 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28046 index 1c70c45..300718d 100644
28047 --- a/drivers/atm/nicstar.c
28048 +++ b/drivers/atm/nicstar.c
28049 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28050 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28051 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28052 card->index);
28053 - atomic_inc(&vcc->stats->tx_err);
28054 + atomic_inc_unchecked(&vcc->stats->tx_err);
28055 dev_kfree_skb_any(skb);
28056 return -EINVAL;
28057 }
28058 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28059 if (!vc->tx) {
28060 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28061 card->index);
28062 - atomic_inc(&vcc->stats->tx_err);
28063 + atomic_inc_unchecked(&vcc->stats->tx_err);
28064 dev_kfree_skb_any(skb);
28065 return -EINVAL;
28066 }
28067 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28068 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28069 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28070 card->index);
28071 - atomic_inc(&vcc->stats->tx_err);
28072 + atomic_inc_unchecked(&vcc->stats->tx_err);
28073 dev_kfree_skb_any(skb);
28074 return -EINVAL;
28075 }
28076
28077 if (skb_shinfo(skb)->nr_frags != 0) {
28078 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28079 - atomic_inc(&vcc->stats->tx_err);
28080 + atomic_inc_unchecked(&vcc->stats->tx_err);
28081 dev_kfree_skb_any(skb);
28082 return -EINVAL;
28083 }
28084 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28085 }
28086
28087 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28088 - atomic_inc(&vcc->stats->tx_err);
28089 + atomic_inc_unchecked(&vcc->stats->tx_err);
28090 dev_kfree_skb_any(skb);
28091 return -EIO;
28092 }
28093 - atomic_inc(&vcc->stats->tx);
28094 + atomic_inc_unchecked(&vcc->stats->tx);
28095
28096 return 0;
28097 }
28098 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28099 printk
28100 ("nicstar%d: Can't allocate buffers for aal0.\n",
28101 card->index);
28102 - atomic_add(i, &vcc->stats->rx_drop);
28103 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28104 break;
28105 }
28106 if (!atm_charge(vcc, sb->truesize)) {
28107 RXPRINTK
28108 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28109 card->index);
28110 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28111 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28112 dev_kfree_skb_any(sb);
28113 break;
28114 }
28115 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28116 ATM_SKB(sb)->vcc = vcc;
28117 __net_timestamp(sb);
28118 vcc->push(vcc, sb);
28119 - atomic_inc(&vcc->stats->rx);
28120 + atomic_inc_unchecked(&vcc->stats->rx);
28121 cell += ATM_CELL_PAYLOAD;
28122 }
28123
28124 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28125 if (iovb == NULL) {
28126 printk("nicstar%d: Out of iovec buffers.\n",
28127 card->index);
28128 - atomic_inc(&vcc->stats->rx_drop);
28129 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28130 recycle_rx_buf(card, skb);
28131 return;
28132 }
28133 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28134 small or large buffer itself. */
28135 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28136 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28137 - atomic_inc(&vcc->stats->rx_err);
28138 + atomic_inc_unchecked(&vcc->stats->rx_err);
28139 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28140 NS_MAX_IOVECS);
28141 NS_PRV_IOVCNT(iovb) = 0;
28142 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28143 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28144 card->index);
28145 which_list(card, skb);
28146 - atomic_inc(&vcc->stats->rx_err);
28147 + atomic_inc_unchecked(&vcc->stats->rx_err);
28148 recycle_rx_buf(card, skb);
28149 vc->rx_iov = NULL;
28150 recycle_iov_buf(card, iovb);
28151 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28152 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28153 card->index);
28154 which_list(card, skb);
28155 - atomic_inc(&vcc->stats->rx_err);
28156 + atomic_inc_unchecked(&vcc->stats->rx_err);
28157 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28158 NS_PRV_IOVCNT(iovb));
28159 vc->rx_iov = NULL;
28160 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28161 printk(" - PDU size mismatch.\n");
28162 else
28163 printk(".\n");
28164 - atomic_inc(&vcc->stats->rx_err);
28165 + atomic_inc_unchecked(&vcc->stats->rx_err);
28166 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28167 NS_PRV_IOVCNT(iovb));
28168 vc->rx_iov = NULL;
28169 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28170 /* skb points to a small buffer */
28171 if (!atm_charge(vcc, skb->truesize)) {
28172 push_rxbufs(card, skb);
28173 - atomic_inc(&vcc->stats->rx_drop);
28174 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28175 } else {
28176 skb_put(skb, len);
28177 dequeue_sm_buf(card, skb);
28178 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28179 ATM_SKB(skb)->vcc = vcc;
28180 __net_timestamp(skb);
28181 vcc->push(vcc, skb);
28182 - atomic_inc(&vcc->stats->rx);
28183 + atomic_inc_unchecked(&vcc->stats->rx);
28184 }
28185 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28186 struct sk_buff *sb;
28187 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28188 if (len <= NS_SMBUFSIZE) {
28189 if (!atm_charge(vcc, sb->truesize)) {
28190 push_rxbufs(card, sb);
28191 - atomic_inc(&vcc->stats->rx_drop);
28192 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28193 } else {
28194 skb_put(sb, len);
28195 dequeue_sm_buf(card, sb);
28196 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28197 ATM_SKB(sb)->vcc = vcc;
28198 __net_timestamp(sb);
28199 vcc->push(vcc, sb);
28200 - atomic_inc(&vcc->stats->rx);
28201 + atomic_inc_unchecked(&vcc->stats->rx);
28202 }
28203
28204 push_rxbufs(card, skb);
28205 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28206
28207 if (!atm_charge(vcc, skb->truesize)) {
28208 push_rxbufs(card, skb);
28209 - atomic_inc(&vcc->stats->rx_drop);
28210 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28211 } else {
28212 dequeue_lg_buf(card, skb);
28213 #ifdef NS_USE_DESTRUCTORS
28214 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28215 ATM_SKB(skb)->vcc = vcc;
28216 __net_timestamp(skb);
28217 vcc->push(vcc, skb);
28218 - atomic_inc(&vcc->stats->rx);
28219 + atomic_inc_unchecked(&vcc->stats->rx);
28220 }
28221
28222 push_rxbufs(card, sb);
28223 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28224 printk
28225 ("nicstar%d: Out of huge buffers.\n",
28226 card->index);
28227 - atomic_inc(&vcc->stats->rx_drop);
28228 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28229 recycle_iovec_rx_bufs(card,
28230 (struct iovec *)
28231 iovb->data,
28232 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28233 card->hbpool.count++;
28234 } else
28235 dev_kfree_skb_any(hb);
28236 - atomic_inc(&vcc->stats->rx_drop);
28237 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28238 } else {
28239 /* Copy the small buffer to the huge buffer */
28240 sb = (struct sk_buff *)iov->iov_base;
28241 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28242 #endif /* NS_USE_DESTRUCTORS */
28243 __net_timestamp(hb);
28244 vcc->push(vcc, hb);
28245 - atomic_inc(&vcc->stats->rx);
28246 + atomic_inc_unchecked(&vcc->stats->rx);
28247 }
28248 }
28249
28250 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28251 index e8cd652..bbbd1fc 100644
28252 --- a/drivers/atm/solos-pci.c
28253 +++ b/drivers/atm/solos-pci.c
28254 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28255 }
28256 atm_charge(vcc, skb->truesize);
28257 vcc->push(vcc, skb);
28258 - atomic_inc(&vcc->stats->rx);
28259 + atomic_inc_unchecked(&vcc->stats->rx);
28260 break;
28261
28262 case PKT_STATUS:
28263 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28264 vcc = SKB_CB(oldskb)->vcc;
28265
28266 if (vcc) {
28267 - atomic_inc(&vcc->stats->tx);
28268 + atomic_inc_unchecked(&vcc->stats->tx);
28269 solos_pop(vcc, oldskb);
28270 } else
28271 dev_kfree_skb_irq(oldskb);
28272 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28273 index 90f1ccc..04c4a1e 100644
28274 --- a/drivers/atm/suni.c
28275 +++ b/drivers/atm/suni.c
28276 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28277
28278
28279 #define ADD_LIMITED(s,v) \
28280 - atomic_add((v),&stats->s); \
28281 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28282 + atomic_add_unchecked((v),&stats->s); \
28283 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28284
28285
28286 static void suni_hz(unsigned long from_timer)
28287 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28288 index 5120a96..e2572bd 100644
28289 --- a/drivers/atm/uPD98402.c
28290 +++ b/drivers/atm/uPD98402.c
28291 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28292 struct sonet_stats tmp;
28293 int error = 0;
28294
28295 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28296 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28297 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28298 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28299 if (zero && !error) {
28300 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28301
28302
28303 #define ADD_LIMITED(s,v) \
28304 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28305 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28306 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28307 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28308 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28309 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28310
28311
28312 static void stat_event(struct atm_dev *dev)
28313 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28314 if (reason & uPD98402_INT_PFM) stat_event(dev);
28315 if (reason & uPD98402_INT_PCO) {
28316 (void) GET(PCOCR); /* clear interrupt cause */
28317 - atomic_add(GET(HECCT),
28318 + atomic_add_unchecked(GET(HECCT),
28319 &PRIV(dev)->sonet_stats.uncorr_hcs);
28320 }
28321 if ((reason & uPD98402_INT_RFO) &&
28322 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28323 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28324 uPD98402_INT_LOS),PIMR); /* enable them */
28325 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28326 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28327 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28328 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28329 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28330 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28331 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28332 return 0;
28333 }
28334
28335 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28336 index d889f56..17eb71e 100644
28337 --- a/drivers/atm/zatm.c
28338 +++ b/drivers/atm/zatm.c
28339 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28340 }
28341 if (!size) {
28342 dev_kfree_skb_irq(skb);
28343 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28344 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28345 continue;
28346 }
28347 if (!atm_charge(vcc,skb->truesize)) {
28348 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28349 skb->len = size;
28350 ATM_SKB(skb)->vcc = vcc;
28351 vcc->push(vcc,skb);
28352 - atomic_inc(&vcc->stats->rx);
28353 + atomic_inc_unchecked(&vcc->stats->rx);
28354 }
28355 zout(pos & 0xffff,MTA(mbx));
28356 #if 0 /* probably a stupid idea */
28357 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28358 skb_queue_head(&zatm_vcc->backlog,skb);
28359 break;
28360 }
28361 - atomic_inc(&vcc->stats->tx);
28362 + atomic_inc_unchecked(&vcc->stats->tx);
28363 wake_up(&zatm_vcc->tx_wait);
28364 }
28365
28366 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28367 index 8493536..31adee0 100644
28368 --- a/drivers/base/devtmpfs.c
28369 +++ b/drivers/base/devtmpfs.c
28370 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28371 if (!thread)
28372 return 0;
28373
28374 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28375 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28376 if (err)
28377 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28378 else
28379 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28380 index caf995f..6f76697 100644
28381 --- a/drivers/base/power/wakeup.c
28382 +++ b/drivers/base/power/wakeup.c
28383 @@ -30,14 +30,14 @@ bool events_check_enabled;
28384 * They need to be modified together atomically, so it's better to use one
28385 * atomic variable to hold them both.
28386 */
28387 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28388 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28389
28390 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28391 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28392
28393 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28394 {
28395 - unsigned int comb = atomic_read(&combined_event_count);
28396 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28397
28398 *cnt = (comb >> IN_PROGRESS_BITS);
28399 *inpr = comb & MAX_IN_PROGRESS;
28400 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28401 ws->last_time = ktime_get();
28402
28403 /* Increment the counter of events in progress. */
28404 - atomic_inc(&combined_event_count);
28405 + atomic_inc_unchecked(&combined_event_count);
28406 }
28407
28408 /**
28409 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28410 * Increment the counter of registered wakeup events and decrement the
28411 * couter of wakeup events in progress simultaneously.
28412 */
28413 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28414 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28415 }
28416
28417 /**
28418 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28419 index b0f553b..77b928b 100644
28420 --- a/drivers/block/cciss.c
28421 +++ b/drivers/block/cciss.c
28422 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28423 int err;
28424 u32 cp;
28425
28426 + memset(&arg64, 0, sizeof(arg64));
28427 +
28428 err = 0;
28429 err |=
28430 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28431 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28432 while (!list_empty(&h->reqQ)) {
28433 c = list_entry(h->reqQ.next, CommandList_struct, list);
28434 /* can't do anything if fifo is full */
28435 - if ((h->access.fifo_full(h))) {
28436 + if ((h->access->fifo_full(h))) {
28437 dev_warn(&h->pdev->dev, "fifo full\n");
28438 break;
28439 }
28440 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28441 h->Qdepth--;
28442
28443 /* Tell the controller execute command */
28444 - h->access.submit_command(h, c);
28445 + h->access->submit_command(h, c);
28446
28447 /* Put job onto the completed Q */
28448 addQ(&h->cmpQ, c);
28449 @@ -3443,17 +3445,17 @@ startio:
28450
28451 static inline unsigned long get_next_completion(ctlr_info_t *h)
28452 {
28453 - return h->access.command_completed(h);
28454 + return h->access->command_completed(h);
28455 }
28456
28457 static inline int interrupt_pending(ctlr_info_t *h)
28458 {
28459 - return h->access.intr_pending(h);
28460 + return h->access->intr_pending(h);
28461 }
28462
28463 static inline long interrupt_not_for_us(ctlr_info_t *h)
28464 {
28465 - return ((h->access.intr_pending(h) == 0) ||
28466 + return ((h->access->intr_pending(h) == 0) ||
28467 (h->interrupts_enabled == 0));
28468 }
28469
28470 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28471 u32 a;
28472
28473 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28474 - return h->access.command_completed(h);
28475 + return h->access->command_completed(h);
28476
28477 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28478 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28479 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28480 trans_support & CFGTBL_Trans_use_short_tags);
28481
28482 /* Change the access methods to the performant access methods */
28483 - h->access = SA5_performant_access;
28484 + h->access = &SA5_performant_access;
28485 h->transMethod = CFGTBL_Trans_Performant;
28486
28487 return;
28488 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28489 if (prod_index < 0)
28490 return -ENODEV;
28491 h->product_name = products[prod_index].product_name;
28492 - h->access = *(products[prod_index].access);
28493 + h->access = products[prod_index].access;
28494
28495 if (cciss_board_disabled(h)) {
28496 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28497 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28498 }
28499
28500 /* make sure the board interrupts are off */
28501 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28502 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28503 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28504 if (rc)
28505 goto clean2;
28506 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28507 * fake ones to scoop up any residual completions.
28508 */
28509 spin_lock_irqsave(&h->lock, flags);
28510 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28511 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28512 spin_unlock_irqrestore(&h->lock, flags);
28513 free_irq(h->intr[h->intr_mode], h);
28514 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28515 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28516 dev_info(&h->pdev->dev, "Board READY.\n");
28517 dev_info(&h->pdev->dev,
28518 "Waiting for stale completions to drain.\n");
28519 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28520 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28521 msleep(10000);
28522 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28523 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28524
28525 rc = controller_reset_failed(h->cfgtable);
28526 if (rc)
28527 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28528 cciss_scsi_setup(h);
28529
28530 /* Turn the interrupts on so we can service requests */
28531 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28532 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28533
28534 /* Get the firmware version */
28535 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28536 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28537 kfree(flush_buf);
28538 if (return_code != IO_OK)
28539 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28540 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28541 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28542 free_irq(h->intr[h->intr_mode], h);
28543 }
28544
28545 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28546 index 7fda30e..eb5dfe0 100644
28547 --- a/drivers/block/cciss.h
28548 +++ b/drivers/block/cciss.h
28549 @@ -101,7 +101,7 @@ struct ctlr_info
28550 /* information about each logical volume */
28551 drive_info_struct *drv[CISS_MAX_LUN];
28552
28553 - struct access_method access;
28554 + struct access_method *access;
28555
28556 /* queue and queue Info */
28557 struct list_head reqQ;
28558 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28559 index 9125bbe..eede5c8 100644
28560 --- a/drivers/block/cpqarray.c
28561 +++ b/drivers/block/cpqarray.c
28562 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28563 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28564 goto Enomem4;
28565 }
28566 - hba[i]->access.set_intr_mask(hba[i], 0);
28567 + hba[i]->access->set_intr_mask(hba[i], 0);
28568 if (request_irq(hba[i]->intr, do_ida_intr,
28569 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28570 {
28571 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28572 add_timer(&hba[i]->timer);
28573
28574 /* Enable IRQ now that spinlock and rate limit timer are set up */
28575 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28576 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28577
28578 for(j=0; j<NWD; j++) {
28579 struct gendisk *disk = ida_gendisk[i][j];
28580 @@ -694,7 +694,7 @@ DBGINFO(
28581 for(i=0; i<NR_PRODUCTS; i++) {
28582 if (board_id == products[i].board_id) {
28583 c->product_name = products[i].product_name;
28584 - c->access = *(products[i].access);
28585 + c->access = products[i].access;
28586 break;
28587 }
28588 }
28589 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28590 hba[ctlr]->intr = intr;
28591 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28592 hba[ctlr]->product_name = products[j].product_name;
28593 - hba[ctlr]->access = *(products[j].access);
28594 + hba[ctlr]->access = products[j].access;
28595 hba[ctlr]->ctlr = ctlr;
28596 hba[ctlr]->board_id = board_id;
28597 hba[ctlr]->pci_dev = NULL; /* not PCI */
28598 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28599
28600 while((c = h->reqQ) != NULL) {
28601 /* Can't do anything if we're busy */
28602 - if (h->access.fifo_full(h) == 0)
28603 + if (h->access->fifo_full(h) == 0)
28604 return;
28605
28606 /* Get the first entry from the request Q */
28607 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28608 h->Qdepth--;
28609
28610 /* Tell the controller to do our bidding */
28611 - h->access.submit_command(h, c);
28612 + h->access->submit_command(h, c);
28613
28614 /* Get onto the completion Q */
28615 addQ(&h->cmpQ, c);
28616 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28617 unsigned long flags;
28618 __u32 a,a1;
28619
28620 - istat = h->access.intr_pending(h);
28621 + istat = h->access->intr_pending(h);
28622 /* Is this interrupt for us? */
28623 if (istat == 0)
28624 return IRQ_NONE;
28625 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28626 */
28627 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28628 if (istat & FIFO_NOT_EMPTY) {
28629 - while((a = h->access.command_completed(h))) {
28630 + while((a = h->access->command_completed(h))) {
28631 a1 = a; a &= ~3;
28632 if ((c = h->cmpQ) == NULL)
28633 {
28634 @@ -1449,11 +1449,11 @@ static int sendcmd(
28635 /*
28636 * Disable interrupt
28637 */
28638 - info_p->access.set_intr_mask(info_p, 0);
28639 + info_p->access->set_intr_mask(info_p, 0);
28640 /* Make sure there is room in the command FIFO */
28641 /* Actually it should be completely empty at this time. */
28642 for (i = 200000; i > 0; i--) {
28643 - temp = info_p->access.fifo_full(info_p);
28644 + temp = info_p->access->fifo_full(info_p);
28645 if (temp != 0) {
28646 break;
28647 }
28648 @@ -1466,7 +1466,7 @@ DBG(
28649 /*
28650 * Send the cmd
28651 */
28652 - info_p->access.submit_command(info_p, c);
28653 + info_p->access->submit_command(info_p, c);
28654 complete = pollcomplete(ctlr);
28655
28656 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28657 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28658 * we check the new geometry. Then turn interrupts back on when
28659 * we're done.
28660 */
28661 - host->access.set_intr_mask(host, 0);
28662 + host->access->set_intr_mask(host, 0);
28663 getgeometry(ctlr);
28664 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28665 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28666
28667 for(i=0; i<NWD; i++) {
28668 struct gendisk *disk = ida_gendisk[ctlr][i];
28669 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28670 /* Wait (up to 2 seconds) for a command to complete */
28671
28672 for (i = 200000; i > 0; i--) {
28673 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28674 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28675 if (done == 0) {
28676 udelay(10); /* a short fixed delay */
28677 } else
28678 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28679 index be73e9d..7fbf140 100644
28680 --- a/drivers/block/cpqarray.h
28681 +++ b/drivers/block/cpqarray.h
28682 @@ -99,7 +99,7 @@ struct ctlr_info {
28683 drv_info_t drv[NWD];
28684 struct proc_dir_entry *proc;
28685
28686 - struct access_method access;
28687 + struct access_method *access;
28688
28689 cmdlist_t *reqQ;
28690 cmdlist_t *cmpQ;
28691 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28692 index 8d68056..e67050f 100644
28693 --- a/drivers/block/drbd/drbd_int.h
28694 +++ b/drivers/block/drbd/drbd_int.h
28695 @@ -736,7 +736,7 @@ struct drbd_request;
28696 struct drbd_epoch {
28697 struct list_head list;
28698 unsigned int barrier_nr;
28699 - atomic_t epoch_size; /* increased on every request added. */
28700 + atomic_unchecked_t epoch_size; /* increased on every request added. */
28701 atomic_t active; /* increased on every req. added, and dec on every finished. */
28702 unsigned long flags;
28703 };
28704 @@ -1108,7 +1108,7 @@ struct drbd_conf {
28705 void *int_dig_in;
28706 void *int_dig_vv;
28707 wait_queue_head_t seq_wait;
28708 - atomic_t packet_seq;
28709 + atomic_unchecked_t packet_seq;
28710 unsigned int peer_seq;
28711 spinlock_t peer_seq_lock;
28712 unsigned int minor;
28713 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28714
28715 static inline void drbd_tcp_cork(struct socket *sock)
28716 {
28717 - int __user val = 1;
28718 + int val = 1;
28719 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28720 - (char __user *)&val, sizeof(val));
28721 + (char __force_user *)&val, sizeof(val));
28722 }
28723
28724 static inline void drbd_tcp_uncork(struct socket *sock)
28725 {
28726 - int __user val = 0;
28727 + int val = 0;
28728 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28729 - (char __user *)&val, sizeof(val));
28730 + (char __force_user *)&val, sizeof(val));
28731 }
28732
28733 static inline void drbd_tcp_nodelay(struct socket *sock)
28734 {
28735 - int __user val = 1;
28736 + int val = 1;
28737 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28738 - (char __user *)&val, sizeof(val));
28739 + (char __force_user *)&val, sizeof(val));
28740 }
28741
28742 static inline void drbd_tcp_quickack(struct socket *sock)
28743 {
28744 - int __user val = 2;
28745 + int val = 2;
28746 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28747 - (char __user *)&val, sizeof(val));
28748 + (char __force_user *)&val, sizeof(val));
28749 }
28750
28751 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28752 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28753 index 211fc44..c5116f1 100644
28754 --- a/drivers/block/drbd/drbd_main.c
28755 +++ b/drivers/block/drbd/drbd_main.c
28756 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28757 p.sector = sector;
28758 p.block_id = block_id;
28759 p.blksize = blksize;
28760 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28761 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28762
28763 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28764 return false;
28765 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28766 p.sector = cpu_to_be64(req->sector);
28767 p.block_id = (unsigned long)req;
28768 p.seq_num = cpu_to_be32(req->seq_num =
28769 - atomic_add_return(1, &mdev->packet_seq));
28770 + atomic_add_return_unchecked(1, &mdev->packet_seq));
28771
28772 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28773
28774 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28775 atomic_set(&mdev->unacked_cnt, 0);
28776 atomic_set(&mdev->local_cnt, 0);
28777 atomic_set(&mdev->net_cnt, 0);
28778 - atomic_set(&mdev->packet_seq, 0);
28779 + atomic_set_unchecked(&mdev->packet_seq, 0);
28780 atomic_set(&mdev->pp_in_use, 0);
28781 atomic_set(&mdev->pp_in_use_by_net, 0);
28782 atomic_set(&mdev->rs_sect_in, 0);
28783 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28784 mdev->receiver.t_state);
28785
28786 /* no need to lock it, I'm the only thread alive */
28787 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28788 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28789 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28790 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28791 mdev->al_writ_cnt =
28792 mdev->bm_writ_cnt =
28793 mdev->read_cnt =
28794 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28795 index af2a250..219c74b 100644
28796 --- a/drivers/block/drbd/drbd_nl.c
28797 +++ b/drivers/block/drbd/drbd_nl.c
28798 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28799 module_put(THIS_MODULE);
28800 }
28801
28802 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28803 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28804
28805 static unsigned short *
28806 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28807 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28808 cn_reply->id.idx = CN_IDX_DRBD;
28809 cn_reply->id.val = CN_VAL_DRBD;
28810
28811 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28812 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28813 cn_reply->ack = 0; /* not used here. */
28814 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28815 (int)((char *)tl - (char *)reply->tag_list);
28816 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28817 cn_reply->id.idx = CN_IDX_DRBD;
28818 cn_reply->id.val = CN_VAL_DRBD;
28819
28820 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28821 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28822 cn_reply->ack = 0; /* not used here. */
28823 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28824 (int)((char *)tl - (char *)reply->tag_list);
28825 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28826 cn_reply->id.idx = CN_IDX_DRBD;
28827 cn_reply->id.val = CN_VAL_DRBD;
28828
28829 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28830 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28831 cn_reply->ack = 0; // not used here.
28832 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28833 (int)((char*)tl - (char*)reply->tag_list);
28834 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28835 cn_reply->id.idx = CN_IDX_DRBD;
28836 cn_reply->id.val = CN_VAL_DRBD;
28837
28838 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28839 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28840 cn_reply->ack = 0; /* not used here. */
28841 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28842 (int)((char *)tl - (char *)reply->tag_list);
28843 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28844 index 43beaca..4a5b1dd 100644
28845 --- a/drivers/block/drbd/drbd_receiver.c
28846 +++ b/drivers/block/drbd/drbd_receiver.c
28847 @@ -894,7 +894,7 @@ retry:
28848 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28849 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28850
28851 - atomic_set(&mdev->packet_seq, 0);
28852 + atomic_set_unchecked(&mdev->packet_seq, 0);
28853 mdev->peer_seq = 0;
28854
28855 drbd_thread_start(&mdev->asender);
28856 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28857 do {
28858 next_epoch = NULL;
28859
28860 - epoch_size = atomic_read(&epoch->epoch_size);
28861 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28862
28863 switch (ev & ~EV_CLEANUP) {
28864 case EV_PUT:
28865 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28866 rv = FE_DESTROYED;
28867 } else {
28868 epoch->flags = 0;
28869 - atomic_set(&epoch->epoch_size, 0);
28870 + atomic_set_unchecked(&epoch->epoch_size, 0);
28871 /* atomic_set(&epoch->active, 0); is already zero */
28872 if (rv == FE_STILL_LIVE)
28873 rv = FE_RECYCLED;
28874 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28875 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28876 drbd_flush(mdev);
28877
28878 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28879 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28880 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28881 if (epoch)
28882 break;
28883 }
28884
28885 epoch = mdev->current_epoch;
28886 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28887 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28888
28889 D_ASSERT(atomic_read(&epoch->active) == 0);
28890 D_ASSERT(epoch->flags == 0);
28891 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28892 }
28893
28894 epoch->flags = 0;
28895 - atomic_set(&epoch->epoch_size, 0);
28896 + atomic_set_unchecked(&epoch->epoch_size, 0);
28897 atomic_set(&epoch->active, 0);
28898
28899 spin_lock(&mdev->epoch_lock);
28900 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28901 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28902 list_add(&epoch->list, &mdev->current_epoch->list);
28903 mdev->current_epoch = epoch;
28904 mdev->epochs++;
28905 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28906 spin_unlock(&mdev->peer_seq_lock);
28907
28908 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28909 - atomic_inc(&mdev->current_epoch->epoch_size);
28910 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28911 return drbd_drain_block(mdev, data_size);
28912 }
28913
28914 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28915
28916 spin_lock(&mdev->epoch_lock);
28917 e->epoch = mdev->current_epoch;
28918 - atomic_inc(&e->epoch->epoch_size);
28919 + atomic_inc_unchecked(&e->epoch->epoch_size);
28920 atomic_inc(&e->epoch->active);
28921 spin_unlock(&mdev->epoch_lock);
28922
28923 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28924 D_ASSERT(list_empty(&mdev->done_ee));
28925
28926 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28927 - atomic_set(&mdev->current_epoch->epoch_size, 0);
28928 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28929 D_ASSERT(list_empty(&mdev->current_epoch->list));
28930 }
28931
28932 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28933 index cd50435..ba1ffb5 100644
28934 --- a/drivers/block/loop.c
28935 +++ b/drivers/block/loop.c
28936 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
28937 mm_segment_t old_fs = get_fs();
28938
28939 set_fs(get_ds());
28940 - bw = file->f_op->write(file, buf, len, &pos);
28941 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28942 set_fs(old_fs);
28943 if (likely(bw == len))
28944 return 0;
28945 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28946 index 4364303..9adf4ee 100644
28947 --- a/drivers/char/Kconfig
28948 +++ b/drivers/char/Kconfig
28949 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28950
28951 config DEVKMEM
28952 bool "/dev/kmem virtual device support"
28953 - default y
28954 + default n
28955 + depends on !GRKERNSEC_KMEM
28956 help
28957 Say Y here if you want to support the /dev/kmem device. The
28958 /dev/kmem device is rarely used, but can be used for certain
28959 @@ -596,6 +597,7 @@ config DEVPORT
28960 bool
28961 depends on !M68K
28962 depends on ISA || PCI
28963 + depends on !GRKERNSEC_KMEM
28964 default y
28965
28966 source "drivers/s390/char/Kconfig"
28967 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28968 index 2e04433..22afc64 100644
28969 --- a/drivers/char/agp/frontend.c
28970 +++ b/drivers/char/agp/frontend.c
28971 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28972 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28973 return -EFAULT;
28974
28975 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28976 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28977 return -EFAULT;
28978
28979 client = agp_find_client_by_pid(reserve.pid);
28980 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28981 index 095ab90..afad0a4 100644
28982 --- a/drivers/char/briq_panel.c
28983 +++ b/drivers/char/briq_panel.c
28984 @@ -9,6 +9,7 @@
28985 #include <linux/types.h>
28986 #include <linux/errno.h>
28987 #include <linux/tty.h>
28988 +#include <linux/mutex.h>
28989 #include <linux/timer.h>
28990 #include <linux/kernel.h>
28991 #include <linux/wait.h>
28992 @@ -34,6 +35,7 @@ static int vfd_is_open;
28993 static unsigned char vfd[40];
28994 static int vfd_cursor;
28995 static unsigned char ledpb, led;
28996 +static DEFINE_MUTEX(vfd_mutex);
28997
28998 static void update_vfd(void)
28999 {
29000 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29001 if (!vfd_is_open)
29002 return -EBUSY;
29003
29004 + mutex_lock(&vfd_mutex);
29005 for (;;) {
29006 char c;
29007 if (!indx)
29008 break;
29009 - if (get_user(c, buf))
29010 + if (get_user(c, buf)) {
29011 + mutex_unlock(&vfd_mutex);
29012 return -EFAULT;
29013 + }
29014 if (esc) {
29015 set_led(c);
29016 esc = 0;
29017 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29018 buf++;
29019 }
29020 update_vfd();
29021 + mutex_unlock(&vfd_mutex);
29022
29023 return len;
29024 }
29025 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29026 index f773a9d..65cd683 100644
29027 --- a/drivers/char/genrtc.c
29028 +++ b/drivers/char/genrtc.c
29029 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
29030 switch (cmd) {
29031
29032 case RTC_PLL_GET:
29033 + memset(&pll, 0, sizeof(pll));
29034 if (get_rtc_pll(&pll))
29035 return -EINVAL;
29036 else
29037 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29038 index 0833896..cccce52 100644
29039 --- a/drivers/char/hpet.c
29040 +++ b/drivers/char/hpet.c
29041 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29042 }
29043
29044 static int
29045 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29046 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29047 struct hpet_info *info)
29048 {
29049 struct hpet_timer __iomem *timer;
29050 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29051 index 58c0e63..46c16bf 100644
29052 --- a/drivers/char/ipmi/ipmi_msghandler.c
29053 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29054 @@ -415,7 +415,7 @@ struct ipmi_smi {
29055 struct proc_dir_entry *proc_dir;
29056 char proc_dir_name[10];
29057
29058 - atomic_t stats[IPMI_NUM_STATS];
29059 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29060
29061 /*
29062 * run_to_completion duplicate of smb_info, smi_info
29063 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29064
29065
29066 #define ipmi_inc_stat(intf, stat) \
29067 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29068 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29069 #define ipmi_get_stat(intf, stat) \
29070 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29071 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29072
29073 static int is_lan_addr(struct ipmi_addr *addr)
29074 {
29075 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29076 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29077 init_waitqueue_head(&intf->waitq);
29078 for (i = 0; i < IPMI_NUM_STATS; i++)
29079 - atomic_set(&intf->stats[i], 0);
29080 + atomic_set_unchecked(&intf->stats[i], 0);
29081
29082 intf->proc_dir = NULL;
29083
29084 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29085 index 50fcf9c..91b5528 100644
29086 --- a/drivers/char/ipmi/ipmi_si_intf.c
29087 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29088 @@ -277,7 +277,7 @@ struct smi_info {
29089 unsigned char slave_addr;
29090
29091 /* Counters and things for the proc filesystem. */
29092 - atomic_t stats[SI_NUM_STATS];
29093 + atomic_unchecked_t stats[SI_NUM_STATS];
29094
29095 struct task_struct *thread;
29096
29097 @@ -286,9 +286,9 @@ struct smi_info {
29098 };
29099
29100 #define smi_inc_stat(smi, stat) \
29101 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29102 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29103 #define smi_get_stat(smi, stat) \
29104 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29105 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29106
29107 #define SI_MAX_PARMS 4
29108
29109 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
29110 atomic_set(&new_smi->req_events, 0);
29111 new_smi->run_to_completion = 0;
29112 for (i = 0; i < SI_NUM_STATS; i++)
29113 - atomic_set(&new_smi->stats[i], 0);
29114 + atomic_set_unchecked(&new_smi->stats[i], 0);
29115
29116 new_smi->interrupt_disabled = 1;
29117 atomic_set(&new_smi->stop_operation, 0);
29118 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29119 index 1aeaaba..e018570 100644
29120 --- a/drivers/char/mbcs.c
29121 +++ b/drivers/char/mbcs.c
29122 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
29123 return 0;
29124 }
29125
29126 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29127 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29128 {
29129 .part_num = MBCS_PART_NUM,
29130 .mfg_num = MBCS_MFG_NUM,
29131 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29132 index d6e9d08..4493e89 100644
29133 --- a/drivers/char/mem.c
29134 +++ b/drivers/char/mem.c
29135 @@ -18,6 +18,7 @@
29136 #include <linux/raw.h>
29137 #include <linux/tty.h>
29138 #include <linux/capability.h>
29139 +#include <linux/security.h>
29140 #include <linux/ptrace.h>
29141 #include <linux/device.h>
29142 #include <linux/highmem.h>
29143 @@ -35,6 +36,10 @@
29144 # include <linux/efi.h>
29145 #endif
29146
29147 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29148 +extern const struct file_operations grsec_fops;
29149 +#endif
29150 +
29151 static inline unsigned long size_inside_page(unsigned long start,
29152 unsigned long size)
29153 {
29154 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29155
29156 while (cursor < to) {
29157 if (!devmem_is_allowed(pfn)) {
29158 +#ifdef CONFIG_GRKERNSEC_KMEM
29159 + gr_handle_mem_readwrite(from, to);
29160 +#else
29161 printk(KERN_INFO
29162 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29163 current->comm, from, to);
29164 +#endif
29165 return 0;
29166 }
29167 cursor += PAGE_SIZE;
29168 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29169 }
29170 return 1;
29171 }
29172 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29173 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29174 +{
29175 + return 0;
29176 +}
29177 #else
29178 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29179 {
29180 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29181
29182 while (count > 0) {
29183 unsigned long remaining;
29184 + char *temp;
29185
29186 sz = size_inside_page(p, count);
29187
29188 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29189 if (!ptr)
29190 return -EFAULT;
29191
29192 - remaining = copy_to_user(buf, ptr, sz);
29193 +#ifdef CONFIG_PAX_USERCOPY
29194 + temp = kmalloc(sz, GFP_KERNEL);
29195 + if (!temp) {
29196 + unxlate_dev_mem_ptr(p, ptr);
29197 + return -ENOMEM;
29198 + }
29199 + memcpy(temp, ptr, sz);
29200 +#else
29201 + temp = ptr;
29202 +#endif
29203 +
29204 + remaining = copy_to_user(buf, temp, sz);
29205 +
29206 +#ifdef CONFIG_PAX_USERCOPY
29207 + kfree(temp);
29208 +#endif
29209 +
29210 unxlate_dev_mem_ptr(p, ptr);
29211 if (remaining)
29212 return -EFAULT;
29213 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29214 size_t count, loff_t *ppos)
29215 {
29216 unsigned long p = *ppos;
29217 - ssize_t low_count, read, sz;
29218 + ssize_t low_count, read, sz, err = 0;
29219 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29220 - int err = 0;
29221
29222 read = 0;
29223 if (p < (unsigned long) high_memory) {
29224 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29225 }
29226 #endif
29227 while (low_count > 0) {
29228 + char *temp;
29229 +
29230 sz = size_inside_page(p, low_count);
29231
29232 /*
29233 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29234 */
29235 kbuf = xlate_dev_kmem_ptr((char *)p);
29236
29237 - if (copy_to_user(buf, kbuf, sz))
29238 +#ifdef CONFIG_PAX_USERCOPY
29239 + temp = kmalloc(sz, GFP_KERNEL);
29240 + if (!temp)
29241 + return -ENOMEM;
29242 + memcpy(temp, kbuf, sz);
29243 +#else
29244 + temp = kbuf;
29245 +#endif
29246 +
29247 + err = copy_to_user(buf, temp, sz);
29248 +
29249 +#ifdef CONFIG_PAX_USERCOPY
29250 + kfree(temp);
29251 +#endif
29252 +
29253 + if (err)
29254 return -EFAULT;
29255 buf += sz;
29256 p += sz;
29257 @@ -867,6 +914,9 @@ static const struct memdev {
29258 #ifdef CONFIG_CRASH_DUMP
29259 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29260 #endif
29261 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29262 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29263 +#endif
29264 };
29265
29266 static int memory_open(struct inode *inode, struct file *filp)
29267 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29268 index da3cfee..a5a6606 100644
29269 --- a/drivers/char/nvram.c
29270 +++ b/drivers/char/nvram.c
29271 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29272
29273 spin_unlock_irq(&rtc_lock);
29274
29275 - if (copy_to_user(buf, contents, tmp - contents))
29276 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29277 return -EFAULT;
29278
29279 *ppos = i;
29280 diff --git a/drivers/char/random.c b/drivers/char/random.c
29281 index 54ca8b2..4a092ed 100644
29282 --- a/drivers/char/random.c
29283 +++ b/drivers/char/random.c
29284 @@ -261,8 +261,13 @@
29285 /*
29286 * Configuration information
29287 */
29288 +#ifdef CONFIG_GRKERNSEC_RANDNET
29289 +#define INPUT_POOL_WORDS 512
29290 +#define OUTPUT_POOL_WORDS 128
29291 +#else
29292 #define INPUT_POOL_WORDS 128
29293 #define OUTPUT_POOL_WORDS 32
29294 +#endif
29295 #define SEC_XFER_SIZE 512
29296 #define EXTRACT_SIZE 10
29297
29298 @@ -300,10 +305,17 @@ static struct poolinfo {
29299 int poolwords;
29300 int tap1, tap2, tap3, tap4, tap5;
29301 } poolinfo_table[] = {
29302 +#ifdef CONFIG_GRKERNSEC_RANDNET
29303 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29304 + { 512, 411, 308, 208, 104, 1 },
29305 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29306 + { 128, 103, 76, 51, 25, 1 },
29307 +#else
29308 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29309 { 128, 103, 76, 51, 25, 1 },
29310 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29311 { 32, 26, 20, 14, 7, 1 },
29312 +#endif
29313 #if 0
29314 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29315 { 2048, 1638, 1231, 819, 411, 1 },
29316 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29317
29318 extract_buf(r, tmp);
29319 i = min_t(int, nbytes, EXTRACT_SIZE);
29320 - if (copy_to_user(buf, tmp, i)) {
29321 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29322 ret = -EFAULT;
29323 break;
29324 }
29325 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29326 #include <linux/sysctl.h>
29327
29328 static int min_read_thresh = 8, min_write_thresh;
29329 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29330 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29331 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29332 static char sysctl_bootid[16];
29333
29334 @@ -1260,10 +1272,15 @@ static int proc_do_uuid(ctl_table *table, int write,
29335 uuid = table->data;
29336 if (!uuid) {
29337 uuid = tmp_uuid;
29338 - uuid[8] = 0;
29339 - }
29340 - if (uuid[8] == 0)
29341 generate_random_uuid(uuid);
29342 + } else {
29343 + static DEFINE_SPINLOCK(bootid_spinlock);
29344 +
29345 + spin_lock(&bootid_spinlock);
29346 + if (!uuid[8])
29347 + generate_random_uuid(uuid);
29348 + spin_unlock(&bootid_spinlock);
29349 + }
29350
29351 sprintf(buf, "%pU", uuid);
29352
29353 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29354 index 1ee8ce7..b778bef 100644
29355 --- a/drivers/char/sonypi.c
29356 +++ b/drivers/char/sonypi.c
29357 @@ -55,6 +55,7 @@
29358 #include <asm/uaccess.h>
29359 #include <asm/io.h>
29360 #include <asm/system.h>
29361 +#include <asm/local.h>
29362
29363 #include <linux/sonypi.h>
29364
29365 @@ -491,7 +492,7 @@ static struct sonypi_device {
29366 spinlock_t fifo_lock;
29367 wait_queue_head_t fifo_proc_list;
29368 struct fasync_struct *fifo_async;
29369 - int open_count;
29370 + local_t open_count;
29371 int model;
29372 struct input_dev *input_jog_dev;
29373 struct input_dev *input_key_dev;
29374 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29375 static int sonypi_misc_release(struct inode *inode, struct file *file)
29376 {
29377 mutex_lock(&sonypi_device.lock);
29378 - sonypi_device.open_count--;
29379 + local_dec(&sonypi_device.open_count);
29380 mutex_unlock(&sonypi_device.lock);
29381 return 0;
29382 }
29383 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29384 {
29385 mutex_lock(&sonypi_device.lock);
29386 /* Flush input queue on first open */
29387 - if (!sonypi_device.open_count)
29388 + if (!local_read(&sonypi_device.open_count))
29389 kfifo_reset(&sonypi_device.fifo);
29390 - sonypi_device.open_count++;
29391 + local_inc(&sonypi_device.open_count);
29392 mutex_unlock(&sonypi_device.lock);
29393
29394 return 0;
29395 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29396 index ad7c732..5aa8054 100644
29397 --- a/drivers/char/tpm/tpm.c
29398 +++ b/drivers/char/tpm/tpm.c
29399 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29400 chip->vendor.req_complete_val)
29401 goto out_recv;
29402
29403 - if ((status == chip->vendor.req_canceled)) {
29404 + if (status == chip->vendor.req_canceled) {
29405 dev_err(chip->dev, "Operation Canceled\n");
29406 rc = -ECANCELED;
29407 goto out;
29408 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29409 index 0636520..169c1d0 100644
29410 --- a/drivers/char/tpm/tpm_bios.c
29411 +++ b/drivers/char/tpm/tpm_bios.c
29412 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29413 event = addr;
29414
29415 if ((event->event_type == 0 && event->event_size == 0) ||
29416 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29417 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29418 return NULL;
29419
29420 return addr;
29421 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29422 return NULL;
29423
29424 if ((event->event_type == 0 && event->event_size == 0) ||
29425 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29426 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29427 return NULL;
29428
29429 (*pos)++;
29430 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29431 int i;
29432
29433 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29434 - seq_putc(m, data[i]);
29435 + if (!seq_putc(m, data[i]))
29436 + return -EFAULT;
29437
29438 return 0;
29439 }
29440 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29441 log->bios_event_log_end = log->bios_event_log + len;
29442
29443 virt = acpi_os_map_memory(start, len);
29444 + if (!virt) {
29445 + kfree(log->bios_event_log);
29446 + log->bios_event_log = NULL;
29447 + return -EFAULT;
29448 + }
29449
29450 - memcpy(log->bios_event_log, virt, len);
29451 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29452
29453 acpi_os_unmap_memory(virt, len);
29454 return 0;
29455 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29456 index b58b561..c9088c8 100644
29457 --- a/drivers/char/virtio_console.c
29458 +++ b/drivers/char/virtio_console.c
29459 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29460 if (to_user) {
29461 ssize_t ret;
29462
29463 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29464 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29465 if (ret)
29466 return -EFAULT;
29467 } else {
29468 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29469 if (!port_has_data(port) && !port->host_connected)
29470 return 0;
29471
29472 - return fill_readbuf(port, ubuf, count, true);
29473 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29474 }
29475
29476 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29477 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
29478 index c9eee6d..f9d5280 100644
29479 --- a/drivers/edac/amd64_edac.c
29480 +++ b/drivers/edac/amd64_edac.c
29481 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
29482 * PCI core identifies what devices are on a system during boot, and then
29483 * inquiry this table to see if this driver is for a given device found.
29484 */
29485 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
29486 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
29487 {
29488 .vendor = PCI_VENDOR_ID_AMD,
29489 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
29490 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
29491 index e47e73b..348e0bd 100644
29492 --- a/drivers/edac/amd76x_edac.c
29493 +++ b/drivers/edac/amd76x_edac.c
29494 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
29495 edac_mc_free(mci);
29496 }
29497
29498 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
29499 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
29500 {
29501 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29502 AMD762},
29503 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
29504 index 1af531a..3a8ff27 100644
29505 --- a/drivers/edac/e752x_edac.c
29506 +++ b/drivers/edac/e752x_edac.c
29507 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
29508 edac_mc_free(mci);
29509 }
29510
29511 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
29512 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
29513 {
29514 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29515 E7520},
29516 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
29517 index 6ffb6d2..383d8d7 100644
29518 --- a/drivers/edac/e7xxx_edac.c
29519 +++ b/drivers/edac/e7xxx_edac.c
29520 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
29521 edac_mc_free(mci);
29522 }
29523
29524 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
29525 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
29526 {
29527 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29528 E7205},
29529 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29530 index 97f5064..202b6e6 100644
29531 --- a/drivers/edac/edac_pci_sysfs.c
29532 +++ b/drivers/edac/edac_pci_sysfs.c
29533 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29534 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29535 static int edac_pci_poll_msec = 1000; /* one second workq period */
29536
29537 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29538 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29539 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29540 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29541
29542 static struct kobject *edac_pci_top_main_kobj;
29543 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29544 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29545 edac_printk(KERN_CRIT, EDAC_PCI,
29546 "Signaled System Error on %s\n",
29547 pci_name(dev));
29548 - atomic_inc(&pci_nonparity_count);
29549 + atomic_inc_unchecked(&pci_nonparity_count);
29550 }
29551
29552 if (status & (PCI_STATUS_PARITY)) {
29553 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29554 "Master Data Parity Error on %s\n",
29555 pci_name(dev));
29556
29557 - atomic_inc(&pci_parity_count);
29558 + atomic_inc_unchecked(&pci_parity_count);
29559 }
29560
29561 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29562 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29563 "Detected Parity Error on %s\n",
29564 pci_name(dev));
29565
29566 - atomic_inc(&pci_parity_count);
29567 + atomic_inc_unchecked(&pci_parity_count);
29568 }
29569 }
29570
29571 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29572 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29573 "Signaled System Error on %s\n",
29574 pci_name(dev));
29575 - atomic_inc(&pci_nonparity_count);
29576 + atomic_inc_unchecked(&pci_nonparity_count);
29577 }
29578
29579 if (status & (PCI_STATUS_PARITY)) {
29580 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29581 "Master Data Parity Error on "
29582 "%s\n", pci_name(dev));
29583
29584 - atomic_inc(&pci_parity_count);
29585 + atomic_inc_unchecked(&pci_parity_count);
29586 }
29587
29588 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29589 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29590 "Detected Parity Error on %s\n",
29591 pci_name(dev));
29592
29593 - atomic_inc(&pci_parity_count);
29594 + atomic_inc_unchecked(&pci_parity_count);
29595 }
29596 }
29597 }
29598 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29599 if (!check_pci_errors)
29600 return;
29601
29602 - before_count = atomic_read(&pci_parity_count);
29603 + before_count = atomic_read_unchecked(&pci_parity_count);
29604
29605 /* scan all PCI devices looking for a Parity Error on devices and
29606 * bridges.
29607 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29608 /* Only if operator has selected panic on PCI Error */
29609 if (edac_pci_get_panic_on_pe()) {
29610 /* If the count is different 'after' from 'before' */
29611 - if (before_count != atomic_read(&pci_parity_count))
29612 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29613 panic("EDAC: PCI Parity Error");
29614 }
29615 }
29616 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29617 index c0510b3..6e2a954 100644
29618 --- a/drivers/edac/i3000_edac.c
29619 +++ b/drivers/edac/i3000_edac.c
29620 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29621 edac_mc_free(mci);
29622 }
29623
29624 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29625 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29626 {
29627 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29628 I3000},
29629 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29630 index 73f55e200..5faaf59 100644
29631 --- a/drivers/edac/i3200_edac.c
29632 +++ b/drivers/edac/i3200_edac.c
29633 @@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29634 edac_mc_free(mci);
29635 }
29636
29637 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29638 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29639 {
29640 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29641 I3200},
29642 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29643 index 4dc3ac2..67d05a6 100644
29644 --- a/drivers/edac/i5000_edac.c
29645 +++ b/drivers/edac/i5000_edac.c
29646 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29647 *
29648 * The "E500P" device is the first device supported.
29649 */
29650 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29651 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29652 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29653 .driver_data = I5000P},
29654
29655 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29656 index bcbdeec..9886d16 100644
29657 --- a/drivers/edac/i5100_edac.c
29658 +++ b/drivers/edac/i5100_edac.c
29659 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29660 edac_mc_free(mci);
29661 }
29662
29663 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29664 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29665 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29666 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29667 { 0, }
29668 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29669 index 74d6ec34..baff517 100644
29670 --- a/drivers/edac/i5400_edac.c
29671 +++ b/drivers/edac/i5400_edac.c
29672 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29673 *
29674 * The "E500P" device is the first device supported.
29675 */
29676 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29677 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29678 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29679 {0,} /* 0 terminated list. */
29680 };
29681 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29682 index 6104dba..e7ea8e1 100644
29683 --- a/drivers/edac/i7300_edac.c
29684 +++ b/drivers/edac/i7300_edac.c
29685 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29686 *
29687 * Has only 8086:360c PCI ID
29688 */
29689 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29690 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29691 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29692 {0,} /* 0 terminated list. */
29693 };
29694 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29695 index 8568d9b..42b2fa8 100644
29696 --- a/drivers/edac/i7core_edac.c
29697 +++ b/drivers/edac/i7core_edac.c
29698 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29699 /*
29700 * pci_device_id table for which devices we are looking for
29701 */
29702 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29703 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29704 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29705 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29706 {0,} /* 0 terminated list. */
29707 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29708 index 4329d39..f3022ef 100644
29709 --- a/drivers/edac/i82443bxgx_edac.c
29710 +++ b/drivers/edac/i82443bxgx_edac.c
29711 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29712
29713 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29714
29715 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29716 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29717 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29718 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29719 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29720 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29721 index 931a057..fd28340 100644
29722 --- a/drivers/edac/i82860_edac.c
29723 +++ b/drivers/edac/i82860_edac.c
29724 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29725 edac_mc_free(mci);
29726 }
29727
29728 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29729 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29730 {
29731 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29732 I82860},
29733 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29734 index 33864c6..01edc61 100644
29735 --- a/drivers/edac/i82875p_edac.c
29736 +++ b/drivers/edac/i82875p_edac.c
29737 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29738 edac_mc_free(mci);
29739 }
29740
29741 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29742 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29743 {
29744 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29745 I82875P},
29746 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29747 index 4184e01..dcb2cd3 100644
29748 --- a/drivers/edac/i82975x_edac.c
29749 +++ b/drivers/edac/i82975x_edac.c
29750 @@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29751 edac_mc_free(mci);
29752 }
29753
29754 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29755 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29756 {
29757 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29758 I82975X
29759 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29760 index 0106747..0b40417 100644
29761 --- a/drivers/edac/mce_amd.h
29762 +++ b/drivers/edac/mce_amd.h
29763 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
29764 bool (*dc_mce)(u16, u8);
29765 bool (*ic_mce)(u16, u8);
29766 bool (*nb_mce)(u16, u8);
29767 -};
29768 +} __no_const;
29769
29770 void amd_report_gart_errors(bool);
29771 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29772 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29773 index e294e1b..a41b05b 100644
29774 --- a/drivers/edac/r82600_edac.c
29775 +++ b/drivers/edac/r82600_edac.c
29776 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29777 edac_mc_free(mci);
29778 }
29779
29780 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29781 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29782 {
29783 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29784 },
29785 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29786 index 1dc118d..8c68af9 100644
29787 --- a/drivers/edac/sb_edac.c
29788 +++ b/drivers/edac/sb_edac.c
29789 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29790 /*
29791 * pci_device_id table for which devices we are looking for
29792 */
29793 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29794 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29795 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29796 {0,} /* 0 terminated list. */
29797 };
29798 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29799 index b6f47de..c5acf3a 100644
29800 --- a/drivers/edac/x38_edac.c
29801 +++ b/drivers/edac/x38_edac.c
29802 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29803 edac_mc_free(mci);
29804 }
29805
29806 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29807 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29808 {
29809 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29810 X38},
29811 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29812 index 85661b0..c784559a 100644
29813 --- a/drivers/firewire/core-card.c
29814 +++ b/drivers/firewire/core-card.c
29815 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29816
29817 void fw_core_remove_card(struct fw_card *card)
29818 {
29819 - struct fw_card_driver dummy_driver = dummy_driver_template;
29820 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29821
29822 card->driver->update_phy_reg(card, 4,
29823 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29824 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29825 index 4799393..37bd3ab 100644
29826 --- a/drivers/firewire/core-cdev.c
29827 +++ b/drivers/firewire/core-cdev.c
29828 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29829 int ret;
29830
29831 if ((request->channels == 0 && request->bandwidth == 0) ||
29832 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29833 - request->bandwidth < 0)
29834 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29835 return -EINVAL;
29836
29837 r = kmalloc(sizeof(*r), GFP_KERNEL);
29838 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29839 index 855ab3f..11f4bbd 100644
29840 --- a/drivers/firewire/core-transaction.c
29841 +++ b/drivers/firewire/core-transaction.c
29842 @@ -37,6 +37,7 @@
29843 #include <linux/timer.h>
29844 #include <linux/types.h>
29845 #include <linux/workqueue.h>
29846 +#include <linux/sched.h>
29847
29848 #include <asm/byteorder.h>
29849
29850 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29851 index b45be57..5fad18b 100644
29852 --- a/drivers/firewire/core.h
29853 +++ b/drivers/firewire/core.h
29854 @@ -101,6 +101,7 @@ struct fw_card_driver {
29855
29856 int (*stop_iso)(struct fw_iso_context *ctx);
29857 };
29858 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29859
29860 void fw_card_initialize(struct fw_card *card,
29861 const struct fw_card_driver *driver, struct device *device);
29862 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29863 index 153980b..4b4d046 100644
29864 --- a/drivers/firmware/dmi_scan.c
29865 +++ b/drivers/firmware/dmi_scan.c
29866 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29867 }
29868 }
29869 else {
29870 - /*
29871 - * no iounmap() for that ioremap(); it would be a no-op, but
29872 - * it's so early in setup that sucker gets confused into doing
29873 - * what it shouldn't if we actually call it.
29874 - */
29875 p = dmi_ioremap(0xF0000, 0x10000);
29876 if (p == NULL)
29877 goto error;
29878 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29879 if (buf == NULL)
29880 return -1;
29881
29882 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29883 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29884
29885 iounmap(buf);
29886 return 0;
29887 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29888 index 82d5c20..44a7177 100644
29889 --- a/drivers/gpio/gpio-vr41xx.c
29890 +++ b/drivers/gpio/gpio-vr41xx.c
29891 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29892 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29893 maskl, pendl, maskh, pendh);
29894
29895 - atomic_inc(&irq_err_count);
29896 + atomic_inc_unchecked(&irq_err_count);
29897
29898 return -EINVAL;
29899 }
29900 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29901 index 84a4a80..ce0306e 100644
29902 --- a/drivers/gpu/drm/drm_crtc_helper.c
29903 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29904 @@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29905 struct drm_crtc *tmp;
29906 int crtc_mask = 1;
29907
29908 - WARN(!crtc, "checking null crtc?\n");
29909 + BUG_ON(!crtc);
29910
29911 dev = crtc->dev;
29912
29913 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29914 index ebf7d3f..d64c436 100644
29915 --- a/drivers/gpu/drm/drm_drv.c
29916 +++ b/drivers/gpu/drm/drm_drv.c
29917 @@ -312,7 +312,7 @@ module_exit(drm_core_exit);
29918 /**
29919 * Copy and IOCTL return string to user space
29920 */
29921 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29922 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29923 {
29924 int len;
29925
29926 @@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
29927
29928 dev = file_priv->minor->dev;
29929 atomic_inc(&dev->ioctl_count);
29930 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29931 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29932 ++file_priv->ioctl_count;
29933
29934 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29935 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29936 index 6263b01..7987f55 100644
29937 --- a/drivers/gpu/drm/drm_fops.c
29938 +++ b/drivers/gpu/drm/drm_fops.c
29939 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29940 }
29941
29942 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29943 - atomic_set(&dev->counts[i], 0);
29944 + atomic_set_unchecked(&dev->counts[i], 0);
29945
29946 dev->sigdata.lock = NULL;
29947
29948 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29949
29950 retcode = drm_open_helper(inode, filp, dev);
29951 if (!retcode) {
29952 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29953 - if (!dev->open_count++)
29954 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29955 + if (local_inc_return(&dev->open_count) == 1)
29956 retcode = drm_setup(dev);
29957 }
29958 if (!retcode) {
29959 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29960
29961 mutex_lock(&drm_global_mutex);
29962
29963 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29964 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29965
29966 if (dev->driver->preclose)
29967 dev->driver->preclose(dev, file_priv);
29968 @@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
29969 * Begin inline drm_release
29970 */
29971
29972 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29973 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29974 task_pid_nr(current),
29975 (long)old_encode_dev(file_priv->minor->device),
29976 - dev->open_count);
29977 + local_read(&dev->open_count));
29978
29979 /* Release any auth tokens that might point to this file_priv,
29980 (do that under the drm_global_mutex) */
29981 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29982 * End inline drm_release
29983 */
29984
29985 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29986 - if (!--dev->open_count) {
29987 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29988 + if (local_dec_and_test(&dev->open_count)) {
29989 if (atomic_read(&dev->ioctl_count)) {
29990 DRM_ERROR("Device busy: %d\n",
29991 atomic_read(&dev->ioctl_count));
29992 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29993 index c87dc96..326055d 100644
29994 --- a/drivers/gpu/drm/drm_global.c
29995 +++ b/drivers/gpu/drm/drm_global.c
29996 @@ -36,7 +36,7 @@
29997 struct drm_global_item {
29998 struct mutex mutex;
29999 void *object;
30000 - int refcount;
30001 + atomic_t refcount;
30002 };
30003
30004 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30005 @@ -49,7 +49,7 @@ void drm_global_init(void)
30006 struct drm_global_item *item = &glob[i];
30007 mutex_init(&item->mutex);
30008 item->object = NULL;
30009 - item->refcount = 0;
30010 + atomic_set(&item->refcount, 0);
30011 }
30012 }
30013
30014 @@ -59,7 +59,7 @@ void drm_global_release(void)
30015 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30016 struct drm_global_item *item = &glob[i];
30017 BUG_ON(item->object != NULL);
30018 - BUG_ON(item->refcount != 0);
30019 + BUG_ON(atomic_read(&item->refcount) != 0);
30020 }
30021 }
30022
30023 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30024 void *object;
30025
30026 mutex_lock(&item->mutex);
30027 - if (item->refcount == 0) {
30028 + if (atomic_read(&item->refcount) == 0) {
30029 item->object = kzalloc(ref->size, GFP_KERNEL);
30030 if (unlikely(item->object == NULL)) {
30031 ret = -ENOMEM;
30032 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30033 goto out_err;
30034
30035 }
30036 - ++item->refcount;
30037 + atomic_inc(&item->refcount);
30038 ref->object = item->object;
30039 object = item->object;
30040 mutex_unlock(&item->mutex);
30041 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30042 struct drm_global_item *item = &glob[ref->global_type];
30043
30044 mutex_lock(&item->mutex);
30045 - BUG_ON(item->refcount == 0);
30046 + BUG_ON(atomic_read(&item->refcount) == 0);
30047 BUG_ON(ref->object != item->object);
30048 - if (--item->refcount == 0) {
30049 + if (atomic_dec_and_test(&item->refcount)) {
30050 ref->release(ref);
30051 item->object = NULL;
30052 }
30053 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30054 index ab1162d..42587b2 100644
30055 --- a/drivers/gpu/drm/drm_info.c
30056 +++ b/drivers/gpu/drm/drm_info.c
30057 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30058 struct drm_local_map *map;
30059 struct drm_map_list *r_list;
30060
30061 - /* Hardcoded from _DRM_FRAME_BUFFER,
30062 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30063 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30064 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30065 + static const char * const types[] = {
30066 + [_DRM_FRAME_BUFFER] = "FB",
30067 + [_DRM_REGISTERS] = "REG",
30068 + [_DRM_SHM] = "SHM",
30069 + [_DRM_AGP] = "AGP",
30070 + [_DRM_SCATTER_GATHER] = "SG",
30071 + [_DRM_CONSISTENT] = "PCI",
30072 + [_DRM_GEM] = "GEM" };
30073 const char *type;
30074 int i;
30075
30076 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30077 map = r_list->map;
30078 if (!map)
30079 continue;
30080 - if (map->type < 0 || map->type > 5)
30081 + if (map->type >= ARRAY_SIZE(types))
30082 type = "??";
30083 else
30084 type = types[map->type];
30085 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30086 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30087 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30088 vma->vm_flags & VM_IO ? 'i' : '-',
30089 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30090 + 0);
30091 +#else
30092 vma->vm_pgoff);
30093 +#endif
30094
30095 #if defined(__i386__)
30096 pgprot = pgprot_val(vma->vm_page_prot);
30097 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30098 index 637fcc3..e890b33 100644
30099 --- a/drivers/gpu/drm/drm_ioc32.c
30100 +++ b/drivers/gpu/drm/drm_ioc32.c
30101 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30102 request = compat_alloc_user_space(nbytes);
30103 if (!access_ok(VERIFY_WRITE, request, nbytes))
30104 return -EFAULT;
30105 - list = (struct drm_buf_desc *) (request + 1);
30106 + list = (struct drm_buf_desc __user *) (request + 1);
30107
30108 if (__put_user(count, &request->count)
30109 || __put_user(list, &request->list))
30110 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30111 request = compat_alloc_user_space(nbytes);
30112 if (!access_ok(VERIFY_WRITE, request, nbytes))
30113 return -EFAULT;
30114 - list = (struct drm_buf_pub *) (request + 1);
30115 + list = (struct drm_buf_pub __user *) (request + 1);
30116
30117 if (__put_user(count, &request->count)
30118 || __put_user(list, &request->list))
30119 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30120 index 956fd38..e52167a 100644
30121 --- a/drivers/gpu/drm/drm_ioctl.c
30122 +++ b/drivers/gpu/drm/drm_ioctl.c
30123 @@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30124 stats->data[i].value =
30125 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30126 else
30127 - stats->data[i].value = atomic_read(&dev->counts[i]);
30128 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30129 stats->data[i].type = dev->types[i];
30130 }
30131
30132 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30133 index c79c713..2048588 100644
30134 --- a/drivers/gpu/drm/drm_lock.c
30135 +++ b/drivers/gpu/drm/drm_lock.c
30136 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30137 if (drm_lock_take(&master->lock, lock->context)) {
30138 master->lock.file_priv = file_priv;
30139 master->lock.lock_time = jiffies;
30140 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30141 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30142 break; /* Got lock */
30143 }
30144
30145 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30146 return -EINVAL;
30147 }
30148
30149 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30150 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30151
30152 if (drm_lock_free(&master->lock, lock->context)) {
30153 /* FIXME: Should really bail out here. */
30154 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30155 index 7f4b4e1..bf4def2 100644
30156 --- a/drivers/gpu/drm/i810/i810_dma.c
30157 +++ b/drivers/gpu/drm/i810/i810_dma.c
30158 @@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30159 dma->buflist[vertex->idx],
30160 vertex->discard, vertex->used);
30161
30162 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30163 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30164 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30165 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30166 sarea_priv->last_enqueue = dev_priv->counter - 1;
30167 sarea_priv->last_dispatch = (int)hw_status[5];
30168
30169 @@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30170 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30171 mc->last_render);
30172
30173 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30174 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30175 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30176 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30177 sarea_priv->last_enqueue = dev_priv->counter - 1;
30178 sarea_priv->last_dispatch = (int)hw_status[5];
30179
30180 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30181 index c9339f4..f5e1b9d 100644
30182 --- a/drivers/gpu/drm/i810/i810_drv.h
30183 +++ b/drivers/gpu/drm/i810/i810_drv.h
30184 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30185 int page_flipping;
30186
30187 wait_queue_head_t irq_queue;
30188 - atomic_t irq_received;
30189 - atomic_t irq_emitted;
30190 + atomic_unchecked_t irq_received;
30191 + atomic_unchecked_t irq_emitted;
30192
30193 int front_offset;
30194 } drm_i810_private_t;
30195 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30196 index deaa657..e0fd296 100644
30197 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30198 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30199 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30200 I915_READ(GTIMR));
30201 }
30202 seq_printf(m, "Interrupts received: %d\n",
30203 - atomic_read(&dev_priv->irq_received));
30204 + atomic_read_unchecked(&dev_priv->irq_received));
30205 for (i = 0; i < I915_NUM_RINGS; i++) {
30206 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30207 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30208 @@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30209 return ret;
30210
30211 if (opregion->header)
30212 - seq_write(m, opregion->header, OPREGION_SIZE);
30213 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30214
30215 mutex_unlock(&dev->struct_mutex);
30216
30217 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30218 index ddfe3d9..f6e6b21 100644
30219 --- a/drivers/gpu/drm/i915/i915_dma.c
30220 +++ b/drivers/gpu/drm/i915/i915_dma.c
30221 @@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30222 bool can_switch;
30223
30224 spin_lock(&dev->count_lock);
30225 - can_switch = (dev->open_count == 0);
30226 + can_switch = (local_read(&dev->open_count) == 0);
30227 spin_unlock(&dev->count_lock);
30228 return can_switch;
30229 }
30230 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30231 index 9689ca3..294f9c1 100644
30232 --- a/drivers/gpu/drm/i915/i915_drv.h
30233 +++ b/drivers/gpu/drm/i915/i915_drv.h
30234 @@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
30235 /* render clock increase/decrease */
30236 /* display clock increase/decrease */
30237 /* pll clock increase/decrease */
30238 -};
30239 +} __no_const;
30240
30241 struct intel_device_info {
30242 u8 gen;
30243 @@ -320,7 +320,7 @@ typedef struct drm_i915_private {
30244 int current_page;
30245 int page_flipping;
30246
30247 - atomic_t irq_received;
30248 + atomic_unchecked_t irq_received;
30249
30250 /* protects the irq masks */
30251 spinlock_t irq_lock;
30252 @@ -896,7 +896,7 @@ struct drm_i915_gem_object {
30253 * will be page flipped away on the next vblank. When it
30254 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30255 */
30256 - atomic_t pending_flip;
30257 + atomic_unchecked_t pending_flip;
30258 };
30259
30260 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30261 @@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30262 extern void intel_teardown_gmbus(struct drm_device *dev);
30263 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30264 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30265 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30266 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30267 {
30268 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30269 }
30270 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30271 index 65e1f00..a30ef00 100644
30272 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30273 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30274 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30275 i915_gem_clflush_object(obj);
30276
30277 if (obj->base.pending_write_domain)
30278 - cd->flips |= atomic_read(&obj->pending_flip);
30279 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30280
30281 /* The actual obj->write_domain will be updated with
30282 * pending_write_domain after we emit the accumulated flush for all
30283 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30284
30285 static int
30286 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30287 - int count)
30288 + unsigned int count)
30289 {
30290 - int i;
30291 + unsigned int i;
30292
30293 for (i = 0; i < count; i++) {
30294 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30295 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30296 index 5bd4361..0241a42 100644
30297 --- a/drivers/gpu/drm/i915/i915_irq.c
30298 +++ b/drivers/gpu/drm/i915/i915_irq.c
30299 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30300 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30301 struct drm_i915_master_private *master_priv;
30302
30303 - atomic_inc(&dev_priv->irq_received);
30304 + atomic_inc_unchecked(&dev_priv->irq_received);
30305
30306 /* disable master interrupt before clearing iir */
30307 de_ier = I915_READ(DEIER);
30308 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30309 struct drm_i915_master_private *master_priv;
30310 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30311
30312 - atomic_inc(&dev_priv->irq_received);
30313 + atomic_inc_unchecked(&dev_priv->irq_received);
30314
30315 if (IS_GEN6(dev))
30316 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30317 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30318 int ret = IRQ_NONE, pipe;
30319 bool blc_event = false;
30320
30321 - atomic_inc(&dev_priv->irq_received);
30322 + atomic_inc_unchecked(&dev_priv->irq_received);
30323
30324 iir = I915_READ(IIR);
30325
30326 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30327 {
30328 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30329
30330 - atomic_set(&dev_priv->irq_received, 0);
30331 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30332
30333 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30334 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30335 @@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30336 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30337 int pipe;
30338
30339 - atomic_set(&dev_priv->irq_received, 0);
30340 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30341
30342 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30343 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30344 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30345 index 2163818..cede019 100644
30346 --- a/drivers/gpu/drm/i915/intel_display.c
30347 +++ b/drivers/gpu/drm/i915/intel_display.c
30348 @@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
30349
30350 wait_event(dev_priv->pending_flip_queue,
30351 atomic_read(&dev_priv->mm.wedged) ||
30352 - atomic_read(&obj->pending_flip) == 0);
30353 + atomic_read_unchecked(&obj->pending_flip) == 0);
30354
30355 /* Big Hammer, we also need to ensure that any pending
30356 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30357 @@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30358 obj = to_intel_framebuffer(crtc->fb)->obj;
30359 dev_priv = crtc->dev->dev_private;
30360 wait_event(dev_priv->pending_flip_queue,
30361 - atomic_read(&obj->pending_flip) == 0);
30362 + atomic_read_unchecked(&obj->pending_flip) == 0);
30363 }
30364
30365 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30366 @@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30367
30368 atomic_clear_mask(1 << intel_crtc->plane,
30369 &obj->pending_flip.counter);
30370 - if (atomic_read(&obj->pending_flip) == 0)
30371 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30372 wake_up(&dev_priv->pending_flip_queue);
30373
30374 schedule_work(&work->work);
30375 @@ -7354,7 +7354,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
30376 OUT_RING(fb->pitches[0] | obj->tiling_mode);
30377 OUT_RING(obj->gtt_offset);
30378
30379 - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30380 + /* Contrary to the suggestions in the documentation,
30381 + * "Enable Panel Fitter" does not seem to be required when page
30382 + * flipping with a non-native mode, and worse causes a normal
30383 + * modeset to fail.
30384 + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30385 + */
30386 + pf = 0;
30387 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
30388 OUT_RING(pf | pipesrc);
30389 ADVANCE_LP_RING();
30390 @@ -7461,7 +7467,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30391 /* Block clients from rendering to the new back buffer until
30392 * the flip occurs and the object is no longer visible.
30393 */
30394 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30395 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30396
30397 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30398 if (ret)
30399 @@ -7475,7 +7481,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30400 return 0;
30401
30402 cleanup_pending:
30403 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30404 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30405 drm_gem_object_unreference(&work->old_fb_obj->base);
30406 drm_gem_object_unreference(&obj->base);
30407 mutex_unlock(&dev->struct_mutex);
30408 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30409 index 54558a0..2d97005 100644
30410 --- a/drivers/gpu/drm/mga/mga_drv.h
30411 +++ b/drivers/gpu/drm/mga/mga_drv.h
30412 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30413 u32 clear_cmd;
30414 u32 maccess;
30415
30416 - atomic_t vbl_received; /**< Number of vblanks received. */
30417 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30418 wait_queue_head_t fence_queue;
30419 - atomic_t last_fence_retired;
30420 + atomic_unchecked_t last_fence_retired;
30421 u32 next_fence_to_post;
30422
30423 unsigned int fb_cpp;
30424 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30425 index 2581202..f230a8d9 100644
30426 --- a/drivers/gpu/drm/mga/mga_irq.c
30427 +++ b/drivers/gpu/drm/mga/mga_irq.c
30428 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30429 if (crtc != 0)
30430 return 0;
30431
30432 - return atomic_read(&dev_priv->vbl_received);
30433 + return atomic_read_unchecked(&dev_priv->vbl_received);
30434 }
30435
30436
30437 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30438 /* VBLANK interrupt */
30439 if (status & MGA_VLINEPEN) {
30440 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30441 - atomic_inc(&dev_priv->vbl_received);
30442 + atomic_inc_unchecked(&dev_priv->vbl_received);
30443 drm_handle_vblank(dev, 0);
30444 handled = 1;
30445 }
30446 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30447 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30448 MGA_WRITE(MGA_PRIMEND, prim_end);
30449
30450 - atomic_inc(&dev_priv->last_fence_retired);
30451 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30452 DRM_WAKEUP(&dev_priv->fence_queue);
30453 handled = 1;
30454 }
30455 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30456 * using fences.
30457 */
30458 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30459 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30460 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30461 - *sequence) <= (1 << 23)));
30462
30463 *sequence = cur_fence;
30464 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30465 index e5cbead..6c354a3 100644
30466 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30467 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30468 @@ -199,7 +199,7 @@ struct methods {
30469 const char desc[8];
30470 void (*loadbios)(struct drm_device *, uint8_t *);
30471 const bool rw;
30472 -};
30473 +} __do_const;
30474
30475 static struct methods shadow_methods[] = {
30476 { "PRAMIN", load_vbios_pramin, true },
30477 @@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30478 struct bit_table {
30479 const char id;
30480 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30481 -};
30482 +} __no_const;
30483
30484 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30485
30486 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30487 index b827098..c31a797 100644
30488 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30489 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30490 @@ -242,7 +242,7 @@ struct nouveau_channel {
30491 struct list_head pending;
30492 uint32_t sequence;
30493 uint32_t sequence_ack;
30494 - atomic_t last_sequence_irq;
30495 + atomic_unchecked_t last_sequence_irq;
30496 struct nouveau_vma vma;
30497 } fence;
30498
30499 @@ -323,7 +323,7 @@ struct nouveau_exec_engine {
30500 u32 handle, u16 class);
30501 void (*set_tile_region)(struct drm_device *dev, int i);
30502 void (*tlb_flush)(struct drm_device *, int engine);
30503 -};
30504 +} __no_const;
30505
30506 struct nouveau_instmem_engine {
30507 void *priv;
30508 @@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
30509 struct nouveau_mc_engine {
30510 int (*init)(struct drm_device *dev);
30511 void (*takedown)(struct drm_device *dev);
30512 -};
30513 +} __no_const;
30514
30515 struct nouveau_timer_engine {
30516 int (*init)(struct drm_device *dev);
30517 void (*takedown)(struct drm_device *dev);
30518 uint64_t (*read)(struct drm_device *dev);
30519 -};
30520 +} __no_const;
30521
30522 struct nouveau_fb_engine {
30523 int num_tiles;
30524 @@ -566,7 +566,7 @@ struct nouveau_vram_engine {
30525 void (*put)(struct drm_device *, struct nouveau_mem **);
30526
30527 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30528 -};
30529 +} __no_const;
30530
30531 struct nouveau_engine {
30532 struct nouveau_instmem_engine instmem;
30533 @@ -714,7 +714,7 @@ struct drm_nouveau_private {
30534 struct drm_global_reference mem_global_ref;
30535 struct ttm_bo_global_ref bo_global_ref;
30536 struct ttm_bo_device bdev;
30537 - atomic_t validate_sequence;
30538 + atomic_unchecked_t validate_sequence;
30539 } ttm;
30540
30541 struct {
30542 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30543 index 2f6daae..c9d7b9e 100644
30544 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30545 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30546 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30547 if (USE_REFCNT(dev))
30548 sequence = nvchan_rd32(chan, 0x48);
30549 else
30550 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30551 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30552
30553 if (chan->fence.sequence_ack == sequence)
30554 goto out;
30555 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30556 return ret;
30557 }
30558
30559 - atomic_set(&chan->fence.last_sequence_irq, 0);
30560 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30561 return 0;
30562 }
30563
30564 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30565 index 7ce3fde..cb3ea04 100644
30566 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30567 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30568 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30569 int trycnt = 0;
30570 int ret, i;
30571
30572 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30573 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30574 retry:
30575 if (++trycnt > 100000) {
30576 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30577 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30578 index f80c5e0..936baa7 100644
30579 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30580 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30581 @@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30582 bool can_switch;
30583
30584 spin_lock(&dev->count_lock);
30585 - can_switch = (dev->open_count == 0);
30586 + can_switch = (local_read(&dev->open_count) == 0);
30587 spin_unlock(&dev->count_lock);
30588 return can_switch;
30589 }
30590 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30591 index dbdea8e..cd6eeeb 100644
30592 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30593 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30594 @@ -554,7 +554,7 @@ static int
30595 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30596 u32 class, u32 mthd, u32 data)
30597 {
30598 - atomic_set(&chan->fence.last_sequence_irq, data);
30599 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30600 return 0;
30601 }
30602
30603 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30604 index bcac90b..53bfc76 100644
30605 --- a/drivers/gpu/drm/r128/r128_cce.c
30606 +++ b/drivers/gpu/drm/r128/r128_cce.c
30607 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30608
30609 /* GH: Simple idle check.
30610 */
30611 - atomic_set(&dev_priv->idle_count, 0);
30612 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30613
30614 /* We don't support anything other than bus-mastering ring mode,
30615 * but the ring can be in either AGP or PCI space for the ring
30616 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30617 index 930c71b..499aded 100644
30618 --- a/drivers/gpu/drm/r128/r128_drv.h
30619 +++ b/drivers/gpu/drm/r128/r128_drv.h
30620 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30621 int is_pci;
30622 unsigned long cce_buffers_offset;
30623
30624 - atomic_t idle_count;
30625 + atomic_unchecked_t idle_count;
30626
30627 int page_flipping;
30628 int current_page;
30629 u32 crtc_offset;
30630 u32 crtc_offset_cntl;
30631
30632 - atomic_t vbl_received;
30633 + atomic_unchecked_t vbl_received;
30634
30635 u32 color_fmt;
30636 unsigned int front_offset;
30637 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30638 index 429d5a0..7e899ed 100644
30639 --- a/drivers/gpu/drm/r128/r128_irq.c
30640 +++ b/drivers/gpu/drm/r128/r128_irq.c
30641 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30642 if (crtc != 0)
30643 return 0;
30644
30645 - return atomic_read(&dev_priv->vbl_received);
30646 + return atomic_read_unchecked(&dev_priv->vbl_received);
30647 }
30648
30649 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30650 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30651 /* VBLANK interrupt */
30652 if (status & R128_CRTC_VBLANK_INT) {
30653 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30654 - atomic_inc(&dev_priv->vbl_received);
30655 + atomic_inc_unchecked(&dev_priv->vbl_received);
30656 drm_handle_vblank(dev, 0);
30657 return IRQ_HANDLED;
30658 }
30659 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30660 index a9e33ce..09edd4b 100644
30661 --- a/drivers/gpu/drm/r128/r128_state.c
30662 +++ b/drivers/gpu/drm/r128/r128_state.c
30663 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30664
30665 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30666 {
30667 - if (atomic_read(&dev_priv->idle_count) == 0)
30668 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30669 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30670 else
30671 - atomic_set(&dev_priv->idle_count, 0);
30672 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30673 }
30674
30675 #endif
30676 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30677 index 5a82b6b..9e69c73 100644
30678 --- a/drivers/gpu/drm/radeon/mkregtable.c
30679 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30680 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30681 regex_t mask_rex;
30682 regmatch_t match[4];
30683 char buf[1024];
30684 - size_t end;
30685 + long end;
30686 int len;
30687 int done = 0;
30688 int r;
30689 unsigned o;
30690 struct offset *offset;
30691 char last_reg_s[10];
30692 - int last_reg;
30693 + unsigned long last_reg;
30694
30695 if (regcomp
30696 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30697 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30698 index 1668ec1..30ebdab 100644
30699 --- a/drivers/gpu/drm/radeon/radeon.h
30700 +++ b/drivers/gpu/drm/radeon/radeon.h
30701 @@ -250,7 +250,7 @@ struct radeon_fence_driver {
30702 uint32_t scratch_reg;
30703 uint64_t gpu_addr;
30704 volatile uint32_t *cpu_addr;
30705 - atomic_t seq;
30706 + atomic_unchecked_t seq;
30707 uint32_t last_seq;
30708 unsigned long last_jiffies;
30709 unsigned long last_timeout;
30710 @@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
30711 int x2, int y2);
30712 void (*draw_auto)(struct radeon_device *rdev);
30713 void (*set_default_state)(struct radeon_device *rdev);
30714 -};
30715 +} __no_const;
30716
30717 struct r600_blit {
30718 struct mutex mutex;
30719 @@ -1201,7 +1201,7 @@ struct radeon_asic {
30720 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30721 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30722 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30723 -};
30724 +} __no_const;
30725
30726 /*
30727 * Asic structures
30728 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30729 index 49f7cb7..2fcb48f 100644
30730 --- a/drivers/gpu/drm/radeon/radeon_device.c
30731 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30732 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30733 bool can_switch;
30734
30735 spin_lock(&dev->count_lock);
30736 - can_switch = (dev->open_count == 0);
30737 + can_switch = (local_read(&dev->open_count) == 0);
30738 spin_unlock(&dev->count_lock);
30739 return can_switch;
30740 }
30741 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30742 index a1b59ca..86f2d44 100644
30743 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30744 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30745 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30746
30747 /* SW interrupt */
30748 wait_queue_head_t swi_queue;
30749 - atomic_t swi_emitted;
30750 + atomic_unchecked_t swi_emitted;
30751 int vblank_crtc;
30752 uint32_t irq_enable_reg;
30753 uint32_t r500_disp_irq_reg;
30754 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30755 index 4bd36a3..e66fe9c 100644
30756 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30757 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30758 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30759 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30760 return 0;
30761 }
30762 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30763 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30764 if (!rdev->ring[fence->ring].ready)
30765 /* FIXME: cp is not running assume everythings is done right
30766 * away
30767 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30768 }
30769 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30770 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30771 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30772 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30773 rdev->fence_drv[ring].initialized = true;
30774 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30775 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30776 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30777 rdev->fence_drv[ring].scratch_reg = -1;
30778 rdev->fence_drv[ring].cpu_addr = NULL;
30779 rdev->fence_drv[ring].gpu_addr = 0;
30780 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30781 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30782 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30783 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30784 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30785 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30786 index 48b7cea..342236f 100644
30787 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30788 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30789 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30790 request = compat_alloc_user_space(sizeof(*request));
30791 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30792 || __put_user(req32.param, &request->param)
30793 - || __put_user((void __user *)(unsigned long)req32.value,
30794 + || __put_user((unsigned long)req32.value,
30795 &request->value))
30796 return -EFAULT;
30797
30798 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30799 index 00da384..32f972d 100644
30800 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30801 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30802 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30803 unsigned int ret;
30804 RING_LOCALS;
30805
30806 - atomic_inc(&dev_priv->swi_emitted);
30807 - ret = atomic_read(&dev_priv->swi_emitted);
30808 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30809 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30810
30811 BEGIN_RING(4);
30812 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30813 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30814 drm_radeon_private_t *dev_priv =
30815 (drm_radeon_private_t *) dev->dev_private;
30816
30817 - atomic_set(&dev_priv->swi_emitted, 0);
30818 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30819 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30820
30821 dev->max_vblank_count = 0x001fffff;
30822 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30823 index e8422ae..d22d4a8 100644
30824 --- a/drivers/gpu/drm/radeon/radeon_state.c
30825 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30826 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30827 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30828 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30829
30830 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30831 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30832 sarea_priv->nbox * sizeof(depth_boxes[0])))
30833 return -EFAULT;
30834
30835 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30836 {
30837 drm_radeon_private_t *dev_priv = dev->dev_private;
30838 drm_radeon_getparam_t *param = data;
30839 - int value;
30840 + int value = 0;
30841
30842 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30843
30844 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30845 index c421e77..e6bf2e8 100644
30846 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30847 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30848 @@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30849 }
30850 if (unlikely(ttm_vm_ops == NULL)) {
30851 ttm_vm_ops = vma->vm_ops;
30852 - radeon_ttm_vm_ops = *ttm_vm_ops;
30853 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30854 + pax_open_kernel();
30855 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30856 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30857 + pax_close_kernel();
30858 }
30859 vma->vm_ops = &radeon_ttm_vm_ops;
30860 return 0;
30861 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30862 index f68dff2..8df955c 100644
30863 --- a/drivers/gpu/drm/radeon/rs690.c
30864 +++ b/drivers/gpu/drm/radeon/rs690.c
30865 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30866 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30867 rdev->pm.sideport_bandwidth.full)
30868 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30869 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30870 + read_delay_latency.full = dfixed_const(800 * 1000);
30871 read_delay_latency.full = dfixed_div(read_delay_latency,
30872 rdev->pm.igp_sideport_mclk);
30873 + a.full = dfixed_const(370);
30874 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30875 } else {
30876 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30877 rdev->pm.k8_bandwidth.full)
30878 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30879 index 499debd..66fce72 100644
30880 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30881 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30882 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30883 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30884 struct shrink_control *sc)
30885 {
30886 - static atomic_t start_pool = ATOMIC_INIT(0);
30887 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30888 unsigned i;
30889 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30890 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30891 struct ttm_page_pool *pool;
30892 int shrink_pages = sc->nr_to_scan;
30893
30894 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30895 index 88edacc..1e5412b 100644
30896 --- a/drivers/gpu/drm/via/via_drv.h
30897 +++ b/drivers/gpu/drm/via/via_drv.h
30898 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30899 typedef uint32_t maskarray_t[5];
30900
30901 typedef struct drm_via_irq {
30902 - atomic_t irq_received;
30903 + atomic_unchecked_t irq_received;
30904 uint32_t pending_mask;
30905 uint32_t enable_mask;
30906 wait_queue_head_t irq_queue;
30907 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30908 struct timeval last_vblank;
30909 int last_vblank_valid;
30910 unsigned usec_per_vblank;
30911 - atomic_t vbl_received;
30912 + atomic_unchecked_t vbl_received;
30913 drm_via_state_t hc_state;
30914 char pci_buf[VIA_PCI_BUF_SIZE];
30915 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30916 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30917 index d391f48..10c8ca3 100644
30918 --- a/drivers/gpu/drm/via/via_irq.c
30919 +++ b/drivers/gpu/drm/via/via_irq.c
30920 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30921 if (crtc != 0)
30922 return 0;
30923
30924 - return atomic_read(&dev_priv->vbl_received);
30925 + return atomic_read_unchecked(&dev_priv->vbl_received);
30926 }
30927
30928 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30929 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30930
30931 status = VIA_READ(VIA_REG_INTERRUPT);
30932 if (status & VIA_IRQ_VBLANK_PENDING) {
30933 - atomic_inc(&dev_priv->vbl_received);
30934 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30935 + atomic_inc_unchecked(&dev_priv->vbl_received);
30936 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30937 do_gettimeofday(&cur_vblank);
30938 if (dev_priv->last_vblank_valid) {
30939 dev_priv->usec_per_vblank =
30940 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30941 dev_priv->last_vblank = cur_vblank;
30942 dev_priv->last_vblank_valid = 1;
30943 }
30944 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30945 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30946 DRM_DEBUG("US per vblank is: %u\n",
30947 dev_priv->usec_per_vblank);
30948 }
30949 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30950
30951 for (i = 0; i < dev_priv->num_irqs; ++i) {
30952 if (status & cur_irq->pending_mask) {
30953 - atomic_inc(&cur_irq->irq_received);
30954 + atomic_inc_unchecked(&cur_irq->irq_received);
30955 DRM_WAKEUP(&cur_irq->irq_queue);
30956 handled = 1;
30957 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30958 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30959 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30960 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30961 masks[irq][4]));
30962 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30963 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30964 } else {
30965 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30966 (((cur_irq_sequence =
30967 - atomic_read(&cur_irq->irq_received)) -
30968 + atomic_read_unchecked(&cur_irq->irq_received)) -
30969 *sequence) <= (1 << 23)));
30970 }
30971 *sequence = cur_irq_sequence;
30972 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30973 }
30974
30975 for (i = 0; i < dev_priv->num_irqs; ++i) {
30976 - atomic_set(&cur_irq->irq_received, 0);
30977 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30978 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30979 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30980 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30981 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30982 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30983 case VIA_IRQ_RELATIVE:
30984 irqwait->request.sequence +=
30985 - atomic_read(&cur_irq->irq_received);
30986 + atomic_read_unchecked(&cur_irq->irq_received);
30987 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30988 case VIA_IRQ_ABSOLUTE:
30989 break;
30990 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30991 index dc27970..f18b008 100644
30992 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30993 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30994 @@ -260,7 +260,7 @@ struct vmw_private {
30995 * Fencing and IRQs.
30996 */
30997
30998 - atomic_t marker_seq;
30999 + atomic_unchecked_t marker_seq;
31000 wait_queue_head_t fence_queue;
31001 wait_queue_head_t fifo_queue;
31002 int fence_queue_waiters; /* Protected by hw_mutex */
31003 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31004 index a0c2f12..68ae6cb 100644
31005 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31006 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31007 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31008 (unsigned int) min,
31009 (unsigned int) fifo->capabilities);
31010
31011 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31012 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31013 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31014 vmw_marker_queue_init(&fifo->marker_queue);
31015 return vmw_fifo_send_fence(dev_priv, &dummy);
31016 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31017 if (reserveable)
31018 iowrite32(bytes, fifo_mem +
31019 SVGA_FIFO_RESERVED);
31020 - return fifo_mem + (next_cmd >> 2);
31021 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31022 } else {
31023 need_bounce = true;
31024 }
31025 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31026
31027 fm = vmw_fifo_reserve(dev_priv, bytes);
31028 if (unlikely(fm == NULL)) {
31029 - *seqno = atomic_read(&dev_priv->marker_seq);
31030 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31031 ret = -ENOMEM;
31032 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31033 false, 3*HZ);
31034 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31035 }
31036
31037 do {
31038 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31039 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31040 } while (*seqno == 0);
31041
31042 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31043 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31044 index cabc95f..14b3d77 100644
31045 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31046 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31047 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31048 * emitted. Then the fence is stale and signaled.
31049 */
31050
31051 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31052 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31053 > VMW_FENCE_WRAP);
31054
31055 return ret;
31056 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31057
31058 if (fifo_idle)
31059 down_read(&fifo_state->rwsem);
31060 - signal_seq = atomic_read(&dev_priv->marker_seq);
31061 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31062 ret = 0;
31063
31064 for (;;) {
31065 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31066 index 8a8725c..afed796 100644
31067 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31068 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31069 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31070 while (!vmw_lag_lt(queue, us)) {
31071 spin_lock(&queue->lock);
31072 if (list_empty(&queue->head))
31073 - seqno = atomic_read(&dev_priv->marker_seq);
31074 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31075 else {
31076 marker = list_first_entry(&queue->head,
31077 struct vmw_marker, head);
31078 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31079 index 75dbe34..f9204a8 100644
31080 --- a/drivers/hid/hid-core.c
31081 +++ b/drivers/hid/hid-core.c
31082 @@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
31083
31084 int hid_add_device(struct hid_device *hdev)
31085 {
31086 - static atomic_t id = ATOMIC_INIT(0);
31087 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31088 int ret;
31089
31090 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31091 @@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
31092 /* XXX hack, any other cleaner solution after the driver core
31093 * is converted to allow more than 20 bytes as the device name? */
31094 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31095 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31096 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31097
31098 hid_debug_register(hdev, dev_name(&hdev->dev));
31099 ret = device_add(&hdev->dev);
31100 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31101 index b1ec0e2..c295a61 100644
31102 --- a/drivers/hid/usbhid/hiddev.c
31103 +++ b/drivers/hid/usbhid/hiddev.c
31104 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31105 break;
31106
31107 case HIDIOCAPPLICATION:
31108 - if (arg < 0 || arg >= hid->maxapplication)
31109 + if (arg >= hid->maxapplication)
31110 break;
31111
31112 for (i = 0; i < hid->maxcollection; i++)
31113 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31114 index 4065374..10ed7dc 100644
31115 --- a/drivers/hv/channel.c
31116 +++ b/drivers/hv/channel.c
31117 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31118 int ret = 0;
31119 int t;
31120
31121 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31122 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31123 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31124 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31125
31126 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31127 if (ret)
31128 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31129 index 12aa97f..c0679f7 100644
31130 --- a/drivers/hv/hv.c
31131 +++ b/drivers/hv/hv.c
31132 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31133 u64 output_address = (output) ? virt_to_phys(output) : 0;
31134 u32 output_address_hi = output_address >> 32;
31135 u32 output_address_lo = output_address & 0xFFFFFFFF;
31136 - void *hypercall_page = hv_context.hypercall_page;
31137 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31138
31139 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31140 "=a"(hv_status_lo) : "d" (control_hi),
31141 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31142 index 6d7d286..92b0873 100644
31143 --- a/drivers/hv/hyperv_vmbus.h
31144 +++ b/drivers/hv/hyperv_vmbus.h
31145 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
31146 struct vmbus_connection {
31147 enum vmbus_connect_state conn_state;
31148
31149 - atomic_t next_gpadl_handle;
31150 + atomic_unchecked_t next_gpadl_handle;
31151
31152 /*
31153 * Represents channel interrupts. Each bit position represents a
31154 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31155 index a220e57..428f54d 100644
31156 --- a/drivers/hv/vmbus_drv.c
31157 +++ b/drivers/hv/vmbus_drv.c
31158 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31159 {
31160 int ret = 0;
31161
31162 - static atomic_t device_num = ATOMIC_INIT(0);
31163 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31164
31165 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31166 - atomic_inc_return(&device_num));
31167 + atomic_inc_return_unchecked(&device_num));
31168
31169 child_device_obj->device.bus = &hv_bus;
31170 child_device_obj->device.parent = &hv_acpi_dev->dev;
31171 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31172 index 554f046..f8b4729 100644
31173 --- a/drivers/hwmon/acpi_power_meter.c
31174 +++ b/drivers/hwmon/acpi_power_meter.c
31175 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31176 return res;
31177
31178 temp /= 1000;
31179 - if (temp < 0)
31180 - return -EINVAL;
31181
31182 mutex_lock(&resource->lock);
31183 resource->trip[attr->index - 7] = temp;
31184 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31185 index 91fdd1f..b66a686 100644
31186 --- a/drivers/hwmon/sht15.c
31187 +++ b/drivers/hwmon/sht15.c
31188 @@ -166,7 +166,7 @@ struct sht15_data {
31189 int supply_uV;
31190 bool supply_uV_valid;
31191 struct work_struct update_supply_work;
31192 - atomic_t interrupt_handled;
31193 + atomic_unchecked_t interrupt_handled;
31194 };
31195
31196 /**
31197 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31198 return ret;
31199
31200 gpio_direction_input(data->pdata->gpio_data);
31201 - atomic_set(&data->interrupt_handled, 0);
31202 + atomic_set_unchecked(&data->interrupt_handled, 0);
31203
31204 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31205 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31206 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31207 /* Only relevant if the interrupt hasn't occurred. */
31208 - if (!atomic_read(&data->interrupt_handled))
31209 + if (!atomic_read_unchecked(&data->interrupt_handled))
31210 schedule_work(&data->read_work);
31211 }
31212 ret = wait_event_timeout(data->wait_queue,
31213 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31214
31215 /* First disable the interrupt */
31216 disable_irq_nosync(irq);
31217 - atomic_inc(&data->interrupt_handled);
31218 + atomic_inc_unchecked(&data->interrupt_handled);
31219 /* Then schedule a reading work struct */
31220 if (data->state != SHT15_READING_NOTHING)
31221 schedule_work(&data->read_work);
31222 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31223 * If not, then start the interrupt again - care here as could
31224 * have gone low in meantime so verify it hasn't!
31225 */
31226 - atomic_set(&data->interrupt_handled, 0);
31227 + atomic_set_unchecked(&data->interrupt_handled, 0);
31228 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31229 /* If still not occurred or another handler has been scheduled */
31230 if (gpio_get_value(data->pdata->gpio_data)
31231 - || atomic_read(&data->interrupt_handled))
31232 + || atomic_read_unchecked(&data->interrupt_handled))
31233 return;
31234 }
31235
31236 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31237 index 378fcb5..5e91fa8 100644
31238 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31239 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31240 @@ -43,7 +43,7 @@
31241 extern struct i2c_adapter amd756_smbus;
31242
31243 static struct i2c_adapter *s4882_adapter;
31244 -static struct i2c_algorithm *s4882_algo;
31245 +static i2c_algorithm_no_const *s4882_algo;
31246
31247 /* Wrapper access functions for multiplexed SMBus */
31248 static DEFINE_MUTEX(amd756_lock);
31249 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31250 index 29015eb..af2d8e9 100644
31251 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31252 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31253 @@ -41,7 +41,7 @@
31254 extern struct i2c_adapter *nforce2_smbus;
31255
31256 static struct i2c_adapter *s4985_adapter;
31257 -static struct i2c_algorithm *s4985_algo;
31258 +static i2c_algorithm_no_const *s4985_algo;
31259
31260 /* Wrapper access functions for multiplexed SMBus */
31261 static DEFINE_MUTEX(nforce2_lock);
31262 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31263 index d7a4833..7fae376 100644
31264 --- a/drivers/i2c/i2c-mux.c
31265 +++ b/drivers/i2c/i2c-mux.c
31266 @@ -28,7 +28,7 @@
31267 /* multiplexer per channel data */
31268 struct i2c_mux_priv {
31269 struct i2c_adapter adap;
31270 - struct i2c_algorithm algo;
31271 + i2c_algorithm_no_const algo;
31272
31273 struct i2c_adapter *parent;
31274 void *mux_dev; /* the mux chip/device */
31275 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31276 index 57d00ca..0145194 100644
31277 --- a/drivers/ide/aec62xx.c
31278 +++ b/drivers/ide/aec62xx.c
31279 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31280 .cable_detect = atp86x_cable_detect,
31281 };
31282
31283 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31284 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31285 { /* 0: AEC6210 */
31286 .name = DRV_NAME,
31287 .init_chipset = init_chipset_aec62xx,
31288 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31289 index 2c8016a..911a27c 100644
31290 --- a/drivers/ide/alim15x3.c
31291 +++ b/drivers/ide/alim15x3.c
31292 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31293 .dma_sff_read_status = ide_dma_sff_read_status,
31294 };
31295
31296 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31297 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31298 .name = DRV_NAME,
31299 .init_chipset = init_chipset_ali15x3,
31300 .init_hwif = init_hwif_ali15x3,
31301 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31302 index 3747b25..56fc995 100644
31303 --- a/drivers/ide/amd74xx.c
31304 +++ b/drivers/ide/amd74xx.c
31305 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31306 .udma_mask = udma, \
31307 }
31308
31309 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31310 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31311 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31312 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31313 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31314 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31315 index 15f0ead..cb43480 100644
31316 --- a/drivers/ide/atiixp.c
31317 +++ b/drivers/ide/atiixp.c
31318 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31319 .cable_detect = atiixp_cable_detect,
31320 };
31321
31322 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31323 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31324 { /* 0: IXP200/300/400/700 */
31325 .name = DRV_NAME,
31326 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31327 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31328 index 5f80312..d1fc438 100644
31329 --- a/drivers/ide/cmd64x.c
31330 +++ b/drivers/ide/cmd64x.c
31331 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31332 .dma_sff_read_status = ide_dma_sff_read_status,
31333 };
31334
31335 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31336 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31337 { /* 0: CMD643 */
31338 .name = DRV_NAME,
31339 .init_chipset = init_chipset_cmd64x,
31340 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31341 index 2c1e5f7..1444762 100644
31342 --- a/drivers/ide/cs5520.c
31343 +++ b/drivers/ide/cs5520.c
31344 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31345 .set_dma_mode = cs5520_set_dma_mode,
31346 };
31347
31348 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31349 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31350 .name = DRV_NAME,
31351 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31352 .port_ops = &cs5520_port_ops,
31353 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31354 index 4dc4eb9..49b40ad 100644
31355 --- a/drivers/ide/cs5530.c
31356 +++ b/drivers/ide/cs5530.c
31357 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31358 .udma_filter = cs5530_udma_filter,
31359 };
31360
31361 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31362 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31363 .name = DRV_NAME,
31364 .init_chipset = init_chipset_cs5530,
31365 .init_hwif = init_hwif_cs5530,
31366 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31367 index 5059faf..18d4c85 100644
31368 --- a/drivers/ide/cs5535.c
31369 +++ b/drivers/ide/cs5535.c
31370 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31371 .cable_detect = cs5535_cable_detect,
31372 };
31373
31374 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31375 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31376 .name = DRV_NAME,
31377 .port_ops = &cs5535_port_ops,
31378 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31379 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31380 index 847553f..3ffb49d 100644
31381 --- a/drivers/ide/cy82c693.c
31382 +++ b/drivers/ide/cy82c693.c
31383 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31384 .set_dma_mode = cy82c693_set_dma_mode,
31385 };
31386
31387 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31388 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31389 .name = DRV_NAME,
31390 .init_iops = init_iops_cy82c693,
31391 .port_ops = &cy82c693_port_ops,
31392 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31393 index 58c51cd..4aec3b8 100644
31394 --- a/drivers/ide/hpt366.c
31395 +++ b/drivers/ide/hpt366.c
31396 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31397 }
31398 };
31399
31400 -static const struct hpt_info hpt36x __devinitdata = {
31401 +static const struct hpt_info hpt36x __devinitconst = {
31402 .chip_name = "HPT36x",
31403 .chip_type = HPT36x,
31404 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31405 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31406 .timings = &hpt36x_timings
31407 };
31408
31409 -static const struct hpt_info hpt370 __devinitdata = {
31410 +static const struct hpt_info hpt370 __devinitconst = {
31411 .chip_name = "HPT370",
31412 .chip_type = HPT370,
31413 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31414 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31415 .timings = &hpt37x_timings
31416 };
31417
31418 -static const struct hpt_info hpt370a __devinitdata = {
31419 +static const struct hpt_info hpt370a __devinitconst = {
31420 .chip_name = "HPT370A",
31421 .chip_type = HPT370A,
31422 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31423 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31424 .timings = &hpt37x_timings
31425 };
31426
31427 -static const struct hpt_info hpt374 __devinitdata = {
31428 +static const struct hpt_info hpt374 __devinitconst = {
31429 .chip_name = "HPT374",
31430 .chip_type = HPT374,
31431 .udma_mask = ATA_UDMA5,
31432 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31433 .timings = &hpt37x_timings
31434 };
31435
31436 -static const struct hpt_info hpt372 __devinitdata = {
31437 +static const struct hpt_info hpt372 __devinitconst = {
31438 .chip_name = "HPT372",
31439 .chip_type = HPT372,
31440 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31441 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31442 .timings = &hpt37x_timings
31443 };
31444
31445 -static const struct hpt_info hpt372a __devinitdata = {
31446 +static const struct hpt_info hpt372a __devinitconst = {
31447 .chip_name = "HPT372A",
31448 .chip_type = HPT372A,
31449 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31450 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31451 .timings = &hpt37x_timings
31452 };
31453
31454 -static const struct hpt_info hpt302 __devinitdata = {
31455 +static const struct hpt_info hpt302 __devinitconst = {
31456 .chip_name = "HPT302",
31457 .chip_type = HPT302,
31458 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31459 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31460 .timings = &hpt37x_timings
31461 };
31462
31463 -static const struct hpt_info hpt371 __devinitdata = {
31464 +static const struct hpt_info hpt371 __devinitconst = {
31465 .chip_name = "HPT371",
31466 .chip_type = HPT371,
31467 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31468 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31469 .timings = &hpt37x_timings
31470 };
31471
31472 -static const struct hpt_info hpt372n __devinitdata = {
31473 +static const struct hpt_info hpt372n __devinitconst = {
31474 .chip_name = "HPT372N",
31475 .chip_type = HPT372N,
31476 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31477 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31478 .timings = &hpt37x_timings
31479 };
31480
31481 -static const struct hpt_info hpt302n __devinitdata = {
31482 +static const struct hpt_info hpt302n __devinitconst = {
31483 .chip_name = "HPT302N",
31484 .chip_type = HPT302N,
31485 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31486 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31487 .timings = &hpt37x_timings
31488 };
31489
31490 -static const struct hpt_info hpt371n __devinitdata = {
31491 +static const struct hpt_info hpt371n __devinitconst = {
31492 .chip_name = "HPT371N",
31493 .chip_type = HPT371N,
31494 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31495 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31496 .dma_sff_read_status = ide_dma_sff_read_status,
31497 };
31498
31499 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31500 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31501 { /* 0: HPT36x */
31502 .name = DRV_NAME,
31503 .init_chipset = init_chipset_hpt366,
31504 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31505 index 8126824..55a2798 100644
31506 --- a/drivers/ide/ide-cd.c
31507 +++ b/drivers/ide/ide-cd.c
31508 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31509 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31510 if ((unsigned long)buf & alignment
31511 || blk_rq_bytes(rq) & q->dma_pad_mask
31512 - || object_is_on_stack(buf))
31513 + || object_starts_on_stack(buf))
31514 drive->dma = 0;
31515 }
31516 }
31517 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31518 index 7f56b73..dab5b67 100644
31519 --- a/drivers/ide/ide-pci-generic.c
31520 +++ b/drivers/ide/ide-pci-generic.c
31521 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31522 .udma_mask = ATA_UDMA6, \
31523 }
31524
31525 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31526 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31527 /* 0: Unknown */
31528 DECLARE_GENERIC_PCI_DEV(0),
31529
31530 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31531 index 560e66d..d5dd180 100644
31532 --- a/drivers/ide/it8172.c
31533 +++ b/drivers/ide/it8172.c
31534 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31535 .set_dma_mode = it8172_set_dma_mode,
31536 };
31537
31538 -static const struct ide_port_info it8172_port_info __devinitdata = {
31539 +static const struct ide_port_info it8172_port_info __devinitconst = {
31540 .name = DRV_NAME,
31541 .port_ops = &it8172_port_ops,
31542 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31543 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31544 index 46816ba..1847aeb 100644
31545 --- a/drivers/ide/it8213.c
31546 +++ b/drivers/ide/it8213.c
31547 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31548 .cable_detect = it8213_cable_detect,
31549 };
31550
31551 -static const struct ide_port_info it8213_chipset __devinitdata = {
31552 +static const struct ide_port_info it8213_chipset __devinitconst = {
31553 .name = DRV_NAME,
31554 .enablebits = { {0x41, 0x80, 0x80} },
31555 .port_ops = &it8213_port_ops,
31556 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31557 index 2e3169f..c5611db 100644
31558 --- a/drivers/ide/it821x.c
31559 +++ b/drivers/ide/it821x.c
31560 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31561 .cable_detect = it821x_cable_detect,
31562 };
31563
31564 -static const struct ide_port_info it821x_chipset __devinitdata = {
31565 +static const struct ide_port_info it821x_chipset __devinitconst = {
31566 .name = DRV_NAME,
31567 .init_chipset = init_chipset_it821x,
31568 .init_hwif = init_hwif_it821x,
31569 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31570 index 74c2c4a..efddd7d 100644
31571 --- a/drivers/ide/jmicron.c
31572 +++ b/drivers/ide/jmicron.c
31573 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31574 .cable_detect = jmicron_cable_detect,
31575 };
31576
31577 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31578 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31579 .name = DRV_NAME,
31580 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31581 .port_ops = &jmicron_port_ops,
31582 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31583 index 95327a2..73f78d8 100644
31584 --- a/drivers/ide/ns87415.c
31585 +++ b/drivers/ide/ns87415.c
31586 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31587 .dma_sff_read_status = superio_dma_sff_read_status,
31588 };
31589
31590 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31591 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31592 .name = DRV_NAME,
31593 .init_hwif = init_hwif_ns87415,
31594 .tp_ops = &ns87415_tp_ops,
31595 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31596 index 1a53a4c..39edc66 100644
31597 --- a/drivers/ide/opti621.c
31598 +++ b/drivers/ide/opti621.c
31599 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31600 .set_pio_mode = opti621_set_pio_mode,
31601 };
31602
31603 -static const struct ide_port_info opti621_chipset __devinitdata = {
31604 +static const struct ide_port_info opti621_chipset __devinitconst = {
31605 .name = DRV_NAME,
31606 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31607 .port_ops = &opti621_port_ops,
31608 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31609 index 9546fe2..2e5ceb6 100644
31610 --- a/drivers/ide/pdc202xx_new.c
31611 +++ b/drivers/ide/pdc202xx_new.c
31612 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31613 .udma_mask = udma, \
31614 }
31615
31616 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31617 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31618 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31619 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31620 };
31621 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31622 index 3a35ec6..5634510 100644
31623 --- a/drivers/ide/pdc202xx_old.c
31624 +++ b/drivers/ide/pdc202xx_old.c
31625 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31626 .max_sectors = sectors, \
31627 }
31628
31629 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31630 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31631 { /* 0: PDC20246 */
31632 .name = DRV_NAME,
31633 .init_chipset = init_chipset_pdc202xx,
31634 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31635 index 1892e81..fe0fd60 100644
31636 --- a/drivers/ide/piix.c
31637 +++ b/drivers/ide/piix.c
31638 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31639 .udma_mask = udma, \
31640 }
31641
31642 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31643 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31644 /* 0: MPIIX */
31645 { /*
31646 * MPIIX actually has only a single IDE channel mapped to
31647 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31648 index a6414a8..c04173e 100644
31649 --- a/drivers/ide/rz1000.c
31650 +++ b/drivers/ide/rz1000.c
31651 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31652 }
31653 }
31654
31655 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31656 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31657 .name = DRV_NAME,
31658 .host_flags = IDE_HFLAG_NO_DMA,
31659 };
31660 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31661 index 356b9b5..d4758eb 100644
31662 --- a/drivers/ide/sc1200.c
31663 +++ b/drivers/ide/sc1200.c
31664 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31665 .dma_sff_read_status = ide_dma_sff_read_status,
31666 };
31667
31668 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31669 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31670 .name = DRV_NAME,
31671 .port_ops = &sc1200_port_ops,
31672 .dma_ops = &sc1200_dma_ops,
31673 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31674 index b7f5b0c..9701038 100644
31675 --- a/drivers/ide/scc_pata.c
31676 +++ b/drivers/ide/scc_pata.c
31677 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31678 .dma_sff_read_status = scc_dma_sff_read_status,
31679 };
31680
31681 -static const struct ide_port_info scc_chipset __devinitdata = {
31682 +static const struct ide_port_info scc_chipset __devinitconst = {
31683 .name = "sccIDE",
31684 .init_iops = init_iops_scc,
31685 .init_dma = scc_init_dma,
31686 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31687 index 35fb8da..24d72ef 100644
31688 --- a/drivers/ide/serverworks.c
31689 +++ b/drivers/ide/serverworks.c
31690 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31691 .cable_detect = svwks_cable_detect,
31692 };
31693
31694 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31695 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31696 { /* 0: OSB4 */
31697 .name = DRV_NAME,
31698 .init_chipset = init_chipset_svwks,
31699 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31700 index ddeda44..46f7e30 100644
31701 --- a/drivers/ide/siimage.c
31702 +++ b/drivers/ide/siimage.c
31703 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31704 .udma_mask = ATA_UDMA6, \
31705 }
31706
31707 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31708 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31709 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31710 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31711 };
31712 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31713 index 4a00225..09e61b4 100644
31714 --- a/drivers/ide/sis5513.c
31715 +++ b/drivers/ide/sis5513.c
31716 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31717 .cable_detect = sis_cable_detect,
31718 };
31719
31720 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31721 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31722 .name = DRV_NAME,
31723 .init_chipset = init_chipset_sis5513,
31724 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31725 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31726 index f21dc2a..d051cd2 100644
31727 --- a/drivers/ide/sl82c105.c
31728 +++ b/drivers/ide/sl82c105.c
31729 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31730 .dma_sff_read_status = ide_dma_sff_read_status,
31731 };
31732
31733 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31734 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31735 .name = DRV_NAME,
31736 .init_chipset = init_chipset_sl82c105,
31737 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31738 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31739 index 864ffe0..863a5e9 100644
31740 --- a/drivers/ide/slc90e66.c
31741 +++ b/drivers/ide/slc90e66.c
31742 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31743 .cable_detect = slc90e66_cable_detect,
31744 };
31745
31746 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31747 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31748 .name = DRV_NAME,
31749 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31750 .port_ops = &slc90e66_port_ops,
31751 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31752 index 4799d5c..1794678 100644
31753 --- a/drivers/ide/tc86c001.c
31754 +++ b/drivers/ide/tc86c001.c
31755 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31756 .dma_sff_read_status = ide_dma_sff_read_status,
31757 };
31758
31759 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31760 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31761 .name = DRV_NAME,
31762 .init_hwif = init_hwif_tc86c001,
31763 .port_ops = &tc86c001_port_ops,
31764 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31765 index 281c914..55ce1b8 100644
31766 --- a/drivers/ide/triflex.c
31767 +++ b/drivers/ide/triflex.c
31768 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31769 .set_dma_mode = triflex_set_mode,
31770 };
31771
31772 -static const struct ide_port_info triflex_device __devinitdata = {
31773 +static const struct ide_port_info triflex_device __devinitconst = {
31774 .name = DRV_NAME,
31775 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31776 .port_ops = &triflex_port_ops,
31777 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31778 index 4b42ca0..e494a98 100644
31779 --- a/drivers/ide/trm290.c
31780 +++ b/drivers/ide/trm290.c
31781 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31782 .dma_check = trm290_dma_check,
31783 };
31784
31785 -static const struct ide_port_info trm290_chipset __devinitdata = {
31786 +static const struct ide_port_info trm290_chipset __devinitconst = {
31787 .name = DRV_NAME,
31788 .init_hwif = init_hwif_trm290,
31789 .tp_ops = &trm290_tp_ops,
31790 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31791 index f46f49c..eb77678 100644
31792 --- a/drivers/ide/via82cxxx.c
31793 +++ b/drivers/ide/via82cxxx.c
31794 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31795 .cable_detect = via82cxxx_cable_detect,
31796 };
31797
31798 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31799 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31800 .name = DRV_NAME,
31801 .init_chipset = init_chipset_via82cxxx,
31802 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31803 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31804 index 73d4531..c90cd2d 100644
31805 --- a/drivers/ieee802154/fakehard.c
31806 +++ b/drivers/ieee802154/fakehard.c
31807 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31808 phy->transmit_power = 0xbf;
31809
31810 dev->netdev_ops = &fake_ops;
31811 - dev->ml_priv = &fake_mlme;
31812 + dev->ml_priv = (void *)&fake_mlme;
31813
31814 priv = netdev_priv(dev);
31815 priv->phy = phy;
31816 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31817 index c889aae..6cf5aa7 100644
31818 --- a/drivers/infiniband/core/cm.c
31819 +++ b/drivers/infiniband/core/cm.c
31820 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31821
31822 struct cm_counter_group {
31823 struct kobject obj;
31824 - atomic_long_t counter[CM_ATTR_COUNT];
31825 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31826 };
31827
31828 struct cm_counter_attribute {
31829 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31830 struct ib_mad_send_buf *msg = NULL;
31831 int ret;
31832
31833 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31834 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31835 counter[CM_REQ_COUNTER]);
31836
31837 /* Quick state check to discard duplicate REQs. */
31838 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31839 if (!cm_id_priv)
31840 return;
31841
31842 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31843 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31844 counter[CM_REP_COUNTER]);
31845 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31846 if (ret)
31847 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31848 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31849 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31850 spin_unlock_irq(&cm_id_priv->lock);
31851 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31852 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31853 counter[CM_RTU_COUNTER]);
31854 goto out;
31855 }
31856 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31857 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31858 dreq_msg->local_comm_id);
31859 if (!cm_id_priv) {
31860 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31861 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31862 counter[CM_DREQ_COUNTER]);
31863 cm_issue_drep(work->port, work->mad_recv_wc);
31864 return -EINVAL;
31865 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31866 case IB_CM_MRA_REP_RCVD:
31867 break;
31868 case IB_CM_TIMEWAIT:
31869 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31870 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31871 counter[CM_DREQ_COUNTER]);
31872 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31873 goto unlock;
31874 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31875 cm_free_msg(msg);
31876 goto deref;
31877 case IB_CM_DREQ_RCVD:
31878 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31879 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31880 counter[CM_DREQ_COUNTER]);
31881 goto unlock;
31882 default:
31883 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31884 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31885 cm_id_priv->msg, timeout)) {
31886 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31887 - atomic_long_inc(&work->port->
31888 + atomic_long_inc_unchecked(&work->port->
31889 counter_group[CM_RECV_DUPLICATES].
31890 counter[CM_MRA_COUNTER]);
31891 goto out;
31892 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31893 break;
31894 case IB_CM_MRA_REQ_RCVD:
31895 case IB_CM_MRA_REP_RCVD:
31896 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31897 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31898 counter[CM_MRA_COUNTER]);
31899 /* fall through */
31900 default:
31901 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31902 case IB_CM_LAP_IDLE:
31903 break;
31904 case IB_CM_MRA_LAP_SENT:
31905 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31906 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31907 counter[CM_LAP_COUNTER]);
31908 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31909 goto unlock;
31910 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31911 cm_free_msg(msg);
31912 goto deref;
31913 case IB_CM_LAP_RCVD:
31914 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31915 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31916 counter[CM_LAP_COUNTER]);
31917 goto unlock;
31918 default:
31919 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31920 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31921 if (cur_cm_id_priv) {
31922 spin_unlock_irq(&cm.lock);
31923 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31924 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31925 counter[CM_SIDR_REQ_COUNTER]);
31926 goto out; /* Duplicate message. */
31927 }
31928 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31929 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31930 msg->retries = 1;
31931
31932 - atomic_long_add(1 + msg->retries,
31933 + atomic_long_add_unchecked(1 + msg->retries,
31934 &port->counter_group[CM_XMIT].counter[attr_index]);
31935 if (msg->retries)
31936 - atomic_long_add(msg->retries,
31937 + atomic_long_add_unchecked(msg->retries,
31938 &port->counter_group[CM_XMIT_RETRIES].
31939 counter[attr_index]);
31940
31941 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31942 }
31943
31944 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31945 - atomic_long_inc(&port->counter_group[CM_RECV].
31946 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31947 counter[attr_id - CM_ATTR_ID_OFFSET]);
31948
31949 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31950 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31951 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31952
31953 return sprintf(buf, "%ld\n",
31954 - atomic_long_read(&group->counter[cm_attr->index]));
31955 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31956 }
31957
31958 static const struct sysfs_ops cm_counter_ops = {
31959 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31960 index 176c8f9..2627b62 100644
31961 --- a/drivers/infiniband/core/fmr_pool.c
31962 +++ b/drivers/infiniband/core/fmr_pool.c
31963 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31964
31965 struct task_struct *thread;
31966
31967 - atomic_t req_ser;
31968 - atomic_t flush_ser;
31969 + atomic_unchecked_t req_ser;
31970 + atomic_unchecked_t flush_ser;
31971
31972 wait_queue_head_t force_wait;
31973 };
31974 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31975 struct ib_fmr_pool *pool = pool_ptr;
31976
31977 do {
31978 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31979 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31980 ib_fmr_batch_release(pool);
31981
31982 - atomic_inc(&pool->flush_ser);
31983 + atomic_inc_unchecked(&pool->flush_ser);
31984 wake_up_interruptible(&pool->force_wait);
31985
31986 if (pool->flush_function)
31987 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31988 }
31989
31990 set_current_state(TASK_INTERRUPTIBLE);
31991 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31992 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31993 !kthread_should_stop())
31994 schedule();
31995 __set_current_state(TASK_RUNNING);
31996 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31997 pool->dirty_watermark = params->dirty_watermark;
31998 pool->dirty_len = 0;
31999 spin_lock_init(&pool->pool_lock);
32000 - atomic_set(&pool->req_ser, 0);
32001 - atomic_set(&pool->flush_ser, 0);
32002 + atomic_set_unchecked(&pool->req_ser, 0);
32003 + atomic_set_unchecked(&pool->flush_ser, 0);
32004 init_waitqueue_head(&pool->force_wait);
32005
32006 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32007 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32008 }
32009 spin_unlock_irq(&pool->pool_lock);
32010
32011 - serial = atomic_inc_return(&pool->req_ser);
32012 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32013 wake_up_process(pool->thread);
32014
32015 if (wait_event_interruptible(pool->force_wait,
32016 - atomic_read(&pool->flush_ser) - serial >= 0))
32017 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32018 return -EINTR;
32019
32020 return 0;
32021 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32022 } else {
32023 list_add_tail(&fmr->list, &pool->dirty_list);
32024 if (++pool->dirty_len >= pool->dirty_watermark) {
32025 - atomic_inc(&pool->req_ser);
32026 + atomic_inc_unchecked(&pool->req_ser);
32027 wake_up_process(pool->thread);
32028 }
32029 }
32030 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32031 index 40c8353..946b0e4 100644
32032 --- a/drivers/infiniband/hw/cxgb4/mem.c
32033 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32034 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32035 int err;
32036 struct fw_ri_tpte tpt;
32037 u32 stag_idx;
32038 - static atomic_t key;
32039 + static atomic_unchecked_t key;
32040
32041 if (c4iw_fatal_error(rdev))
32042 return -EIO;
32043 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32044 &rdev->resource.tpt_fifo_lock);
32045 if (!stag_idx)
32046 return -ENOMEM;
32047 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32048 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32049 }
32050 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32051 __func__, stag_state, type, pdid, stag_idx);
32052 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
32053 index a4de9d5..5fa20c3 100644
32054 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
32055 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
32056 @@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = {
32057 };
32058
32059 static ssize_t flash_read(struct file *file, char __user *buf,
32060 + size_t count, loff_t *ppos) __size_overflow(3);
32061 +static ssize_t flash_read(struct file *file, char __user *buf,
32062 size_t count, loff_t *ppos)
32063 {
32064 struct ipath_devdata *dd;
32065 @@ -177,6 +179,8 @@ bail:
32066 }
32067
32068 static ssize_t flash_write(struct file *file, const char __user *buf,
32069 + size_t count, loff_t *ppos) __size_overflow(3);
32070 +static ssize_t flash_write(struct file *file, const char __user *buf,
32071 size_t count, loff_t *ppos)
32072 {
32073 struct ipath_devdata *dd;
32074 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32075 index 79b3dbc..96e5fcc 100644
32076 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32077 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32078 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32079 struct ib_atomic_eth *ateth;
32080 struct ipath_ack_entry *e;
32081 u64 vaddr;
32082 - atomic64_t *maddr;
32083 + atomic64_unchecked_t *maddr;
32084 u64 sdata;
32085 u32 rkey;
32086 u8 next;
32087 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32088 IB_ACCESS_REMOTE_ATOMIC)))
32089 goto nack_acc_unlck;
32090 /* Perform atomic OP and save result. */
32091 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32092 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32093 sdata = be64_to_cpu(ateth->swap_data);
32094 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32095 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32096 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32097 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32098 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32099 be64_to_cpu(ateth->compare_data),
32100 sdata);
32101 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32102 index 1f95bba..9530f87 100644
32103 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32104 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32105 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32106 unsigned long flags;
32107 struct ib_wc wc;
32108 u64 sdata;
32109 - atomic64_t *maddr;
32110 + atomic64_unchecked_t *maddr;
32111 enum ib_wc_status send_status;
32112
32113 /*
32114 @@ -382,11 +382,11 @@ again:
32115 IB_ACCESS_REMOTE_ATOMIC)))
32116 goto acc_err;
32117 /* Perform atomic OP and save result. */
32118 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32119 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32120 sdata = wqe->wr.wr.atomic.compare_add;
32121 *(u64 *) sqp->s_sge.sge.vaddr =
32122 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32123 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32124 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32125 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32126 sdata, wqe->wr.wr.atomic.swap);
32127 goto send_comp;
32128 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32129 index 7140199..da60063 100644
32130 --- a/drivers/infiniband/hw/nes/nes.c
32131 +++ b/drivers/infiniband/hw/nes/nes.c
32132 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32133 LIST_HEAD(nes_adapter_list);
32134 static LIST_HEAD(nes_dev_list);
32135
32136 -atomic_t qps_destroyed;
32137 +atomic_unchecked_t qps_destroyed;
32138
32139 static unsigned int ee_flsh_adapter;
32140 static unsigned int sysfs_nonidx_addr;
32141 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32142 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32143 struct nes_adapter *nesadapter = nesdev->nesadapter;
32144
32145 - atomic_inc(&qps_destroyed);
32146 + atomic_inc_unchecked(&qps_destroyed);
32147
32148 /* Free the control structures */
32149
32150 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32151 index c438e46..ca30356 100644
32152 --- a/drivers/infiniband/hw/nes/nes.h
32153 +++ b/drivers/infiniband/hw/nes/nes.h
32154 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32155 extern unsigned int wqm_quanta;
32156 extern struct list_head nes_adapter_list;
32157
32158 -extern atomic_t cm_connects;
32159 -extern atomic_t cm_accepts;
32160 -extern atomic_t cm_disconnects;
32161 -extern atomic_t cm_closes;
32162 -extern atomic_t cm_connecteds;
32163 -extern atomic_t cm_connect_reqs;
32164 -extern atomic_t cm_rejects;
32165 -extern atomic_t mod_qp_timouts;
32166 -extern atomic_t qps_created;
32167 -extern atomic_t qps_destroyed;
32168 -extern atomic_t sw_qps_destroyed;
32169 +extern atomic_unchecked_t cm_connects;
32170 +extern atomic_unchecked_t cm_accepts;
32171 +extern atomic_unchecked_t cm_disconnects;
32172 +extern atomic_unchecked_t cm_closes;
32173 +extern atomic_unchecked_t cm_connecteds;
32174 +extern atomic_unchecked_t cm_connect_reqs;
32175 +extern atomic_unchecked_t cm_rejects;
32176 +extern atomic_unchecked_t mod_qp_timouts;
32177 +extern atomic_unchecked_t qps_created;
32178 +extern atomic_unchecked_t qps_destroyed;
32179 +extern atomic_unchecked_t sw_qps_destroyed;
32180 extern u32 mh_detected;
32181 extern u32 mh_pauses_sent;
32182 extern u32 cm_packets_sent;
32183 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32184 extern u32 cm_packets_received;
32185 extern u32 cm_packets_dropped;
32186 extern u32 cm_packets_retrans;
32187 -extern atomic_t cm_listens_created;
32188 -extern atomic_t cm_listens_destroyed;
32189 +extern atomic_unchecked_t cm_listens_created;
32190 +extern atomic_unchecked_t cm_listens_destroyed;
32191 extern u32 cm_backlog_drops;
32192 -extern atomic_t cm_loopbacks;
32193 -extern atomic_t cm_nodes_created;
32194 -extern atomic_t cm_nodes_destroyed;
32195 -extern atomic_t cm_accel_dropped_pkts;
32196 -extern atomic_t cm_resets_recvd;
32197 -extern atomic_t pau_qps_created;
32198 -extern atomic_t pau_qps_destroyed;
32199 +extern atomic_unchecked_t cm_loopbacks;
32200 +extern atomic_unchecked_t cm_nodes_created;
32201 +extern atomic_unchecked_t cm_nodes_destroyed;
32202 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32203 +extern atomic_unchecked_t cm_resets_recvd;
32204 +extern atomic_unchecked_t pau_qps_created;
32205 +extern atomic_unchecked_t pau_qps_destroyed;
32206
32207 extern u32 int_mod_timer_init;
32208 extern u32 int_mod_cq_depth_256;
32209 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32210 index a4972ab..1bcfc31 100644
32211 --- a/drivers/infiniband/hw/nes/nes_cm.c
32212 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32213 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32214 u32 cm_packets_retrans;
32215 u32 cm_packets_created;
32216 u32 cm_packets_received;
32217 -atomic_t cm_listens_created;
32218 -atomic_t cm_listens_destroyed;
32219 +atomic_unchecked_t cm_listens_created;
32220 +atomic_unchecked_t cm_listens_destroyed;
32221 u32 cm_backlog_drops;
32222 -atomic_t cm_loopbacks;
32223 -atomic_t cm_nodes_created;
32224 -atomic_t cm_nodes_destroyed;
32225 -atomic_t cm_accel_dropped_pkts;
32226 -atomic_t cm_resets_recvd;
32227 +atomic_unchecked_t cm_loopbacks;
32228 +atomic_unchecked_t cm_nodes_created;
32229 +atomic_unchecked_t cm_nodes_destroyed;
32230 +atomic_unchecked_t cm_accel_dropped_pkts;
32231 +atomic_unchecked_t cm_resets_recvd;
32232
32233 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32234 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32235 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32236
32237 static struct nes_cm_core *g_cm_core;
32238
32239 -atomic_t cm_connects;
32240 -atomic_t cm_accepts;
32241 -atomic_t cm_disconnects;
32242 -atomic_t cm_closes;
32243 -atomic_t cm_connecteds;
32244 -atomic_t cm_connect_reqs;
32245 -atomic_t cm_rejects;
32246 +atomic_unchecked_t cm_connects;
32247 +atomic_unchecked_t cm_accepts;
32248 +atomic_unchecked_t cm_disconnects;
32249 +atomic_unchecked_t cm_closes;
32250 +atomic_unchecked_t cm_connecteds;
32251 +atomic_unchecked_t cm_connect_reqs;
32252 +atomic_unchecked_t cm_rejects;
32253
32254 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32255 {
32256 @@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32257 kfree(listener);
32258 listener = NULL;
32259 ret = 0;
32260 - atomic_inc(&cm_listens_destroyed);
32261 + atomic_inc_unchecked(&cm_listens_destroyed);
32262 } else {
32263 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32264 }
32265 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32266 cm_node->rem_mac);
32267
32268 add_hte_node(cm_core, cm_node);
32269 - atomic_inc(&cm_nodes_created);
32270 + atomic_inc_unchecked(&cm_nodes_created);
32271
32272 return cm_node;
32273 }
32274 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32275 }
32276
32277 atomic_dec(&cm_core->node_cnt);
32278 - atomic_inc(&cm_nodes_destroyed);
32279 + atomic_inc_unchecked(&cm_nodes_destroyed);
32280 nesqp = cm_node->nesqp;
32281 if (nesqp) {
32282 nesqp->cm_node = NULL;
32283 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32284
32285 static void drop_packet(struct sk_buff *skb)
32286 {
32287 - atomic_inc(&cm_accel_dropped_pkts);
32288 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32289 dev_kfree_skb_any(skb);
32290 }
32291
32292 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32293 {
32294
32295 int reset = 0; /* whether to send reset in case of err.. */
32296 - atomic_inc(&cm_resets_recvd);
32297 + atomic_inc_unchecked(&cm_resets_recvd);
32298 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32299 " refcnt=%d\n", cm_node, cm_node->state,
32300 atomic_read(&cm_node->ref_count));
32301 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32302 rem_ref_cm_node(cm_node->cm_core, cm_node);
32303 return NULL;
32304 }
32305 - atomic_inc(&cm_loopbacks);
32306 + atomic_inc_unchecked(&cm_loopbacks);
32307 loopbackremotenode->loopbackpartner = cm_node;
32308 loopbackremotenode->tcp_cntxt.rcv_wscale =
32309 NES_CM_DEFAULT_RCV_WND_SCALE;
32310 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32311 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32312 else {
32313 rem_ref_cm_node(cm_core, cm_node);
32314 - atomic_inc(&cm_accel_dropped_pkts);
32315 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32316 dev_kfree_skb_any(skb);
32317 }
32318 break;
32319 @@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32320
32321 if ((cm_id) && (cm_id->event_handler)) {
32322 if (issue_disconn) {
32323 - atomic_inc(&cm_disconnects);
32324 + atomic_inc_unchecked(&cm_disconnects);
32325 cm_event.event = IW_CM_EVENT_DISCONNECT;
32326 cm_event.status = disconn_status;
32327 cm_event.local_addr = cm_id->local_addr;
32328 @@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32329 }
32330
32331 if (issue_close) {
32332 - atomic_inc(&cm_closes);
32333 + atomic_inc_unchecked(&cm_closes);
32334 nes_disconnect(nesqp, 1);
32335
32336 cm_id->provider_data = nesqp;
32337 @@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32338
32339 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32340 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32341 - atomic_inc(&cm_accepts);
32342 + atomic_inc_unchecked(&cm_accepts);
32343
32344 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32345 netdev_refcnt_read(nesvnic->netdev));
32346 @@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32347 struct nes_cm_core *cm_core;
32348 u8 *start_buff;
32349
32350 - atomic_inc(&cm_rejects);
32351 + atomic_inc_unchecked(&cm_rejects);
32352 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32353 loopback = cm_node->loopbackpartner;
32354 cm_core = cm_node->cm_core;
32355 @@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32356 ntohl(cm_id->local_addr.sin_addr.s_addr),
32357 ntohs(cm_id->local_addr.sin_port));
32358
32359 - atomic_inc(&cm_connects);
32360 + atomic_inc_unchecked(&cm_connects);
32361 nesqp->active_conn = 1;
32362
32363 /* cache the cm_id in the qp */
32364 @@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32365 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32366 return err;
32367 }
32368 - atomic_inc(&cm_listens_created);
32369 + atomic_inc_unchecked(&cm_listens_created);
32370 }
32371
32372 cm_id->add_ref(cm_id);
32373 @@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32374
32375 if (nesqp->destroyed)
32376 return;
32377 - atomic_inc(&cm_connecteds);
32378 + atomic_inc_unchecked(&cm_connecteds);
32379 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32380 " local port 0x%04X. jiffies = %lu.\n",
32381 nesqp->hwqp.qp_id,
32382 @@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32383
32384 cm_id->add_ref(cm_id);
32385 ret = cm_id->event_handler(cm_id, &cm_event);
32386 - atomic_inc(&cm_closes);
32387 + atomic_inc_unchecked(&cm_closes);
32388 cm_event.event = IW_CM_EVENT_CLOSE;
32389 cm_event.status = 0;
32390 cm_event.provider_data = cm_id->provider_data;
32391 @@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32392 return;
32393 cm_id = cm_node->cm_id;
32394
32395 - atomic_inc(&cm_connect_reqs);
32396 + atomic_inc_unchecked(&cm_connect_reqs);
32397 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32398 cm_node, cm_id, jiffies);
32399
32400 @@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32401 return;
32402 cm_id = cm_node->cm_id;
32403
32404 - atomic_inc(&cm_connect_reqs);
32405 + atomic_inc_unchecked(&cm_connect_reqs);
32406 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32407 cm_node, cm_id, jiffies);
32408
32409 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32410 index 3ba7be3..c81f6ff 100644
32411 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32412 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32413 @@ -40,8 +40,8 @@
32414 #include "nes.h"
32415 #include "nes_mgt.h"
32416
32417 -atomic_t pau_qps_created;
32418 -atomic_t pau_qps_destroyed;
32419 +atomic_unchecked_t pau_qps_created;
32420 +atomic_unchecked_t pau_qps_destroyed;
32421
32422 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32423 {
32424 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32425 {
32426 struct sk_buff *skb;
32427 unsigned long flags;
32428 - atomic_inc(&pau_qps_destroyed);
32429 + atomic_inc_unchecked(&pau_qps_destroyed);
32430
32431 /* Free packets that have not yet been forwarded */
32432 /* Lock is acquired by skb_dequeue when removing the skb */
32433 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32434 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32435 skb_queue_head_init(&nesqp->pau_list);
32436 spin_lock_init(&nesqp->pau_lock);
32437 - atomic_inc(&pau_qps_created);
32438 + atomic_inc_unchecked(&pau_qps_created);
32439 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32440 }
32441
32442 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32443 index f3a3ecf..57d311d 100644
32444 --- a/drivers/infiniband/hw/nes/nes_nic.c
32445 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32446 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32447 target_stat_values[++index] = mh_detected;
32448 target_stat_values[++index] = mh_pauses_sent;
32449 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32450 - target_stat_values[++index] = atomic_read(&cm_connects);
32451 - target_stat_values[++index] = atomic_read(&cm_accepts);
32452 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32453 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32454 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32455 - target_stat_values[++index] = atomic_read(&cm_rejects);
32456 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32457 - target_stat_values[++index] = atomic_read(&qps_created);
32458 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32459 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32460 - target_stat_values[++index] = atomic_read(&cm_closes);
32461 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32462 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32463 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32464 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32465 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32466 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32467 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32468 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32469 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32470 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32471 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32472 target_stat_values[++index] = cm_packets_sent;
32473 target_stat_values[++index] = cm_packets_bounced;
32474 target_stat_values[++index] = cm_packets_created;
32475 target_stat_values[++index] = cm_packets_received;
32476 target_stat_values[++index] = cm_packets_dropped;
32477 target_stat_values[++index] = cm_packets_retrans;
32478 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32479 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32480 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32481 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32482 target_stat_values[++index] = cm_backlog_drops;
32483 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32484 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32485 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32486 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32487 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32488 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32489 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32490 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32491 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32492 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32493 target_stat_values[++index] = nesadapter->free_4kpbl;
32494 target_stat_values[++index] = nesadapter->free_256pbl;
32495 target_stat_values[++index] = int_mod_timer_init;
32496 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32497 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32498 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32499 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32500 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32501 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32502 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32503 }
32504
32505 /**
32506 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32507 index 0927b5c..ed67986 100644
32508 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32509 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32510 @@ -46,9 +46,9 @@
32511
32512 #include <rdma/ib_umem.h>
32513
32514 -atomic_t mod_qp_timouts;
32515 -atomic_t qps_created;
32516 -atomic_t sw_qps_destroyed;
32517 +atomic_unchecked_t mod_qp_timouts;
32518 +atomic_unchecked_t qps_created;
32519 +atomic_unchecked_t sw_qps_destroyed;
32520
32521 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32522
32523 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32524 if (init_attr->create_flags)
32525 return ERR_PTR(-EINVAL);
32526
32527 - atomic_inc(&qps_created);
32528 + atomic_inc_unchecked(&qps_created);
32529 switch (init_attr->qp_type) {
32530 case IB_QPT_RC:
32531 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32532 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32533 struct iw_cm_event cm_event;
32534 int ret = 0;
32535
32536 - atomic_inc(&sw_qps_destroyed);
32537 + atomic_inc_unchecked(&sw_qps_destroyed);
32538 nesqp->destroyed = 1;
32539
32540 /* Blow away the connection if it exists. */
32541 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32542 index b881bdc..c2e360c 100644
32543 --- a/drivers/infiniband/hw/qib/qib.h
32544 +++ b/drivers/infiniband/hw/qib/qib.h
32545 @@ -51,6 +51,7 @@
32546 #include <linux/completion.h>
32547 #include <linux/kref.h>
32548 #include <linux/sched.h>
32549 +#include <linux/slab.h>
32550
32551 #include "qib_common.h"
32552 #include "qib_verbs.h"
32553 diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
32554 index 05e0f17..0275789 100644
32555 --- a/drivers/infiniband/hw/qib/qib_fs.c
32556 +++ b/drivers/infiniband/hw/qib/qib_fs.c
32557 @@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = {
32558 };
32559
32560 static ssize_t flash_read(struct file *file, char __user *buf,
32561 + size_t count, loff_t *ppos) __size_overflow(3);
32562 +static ssize_t flash_read(struct file *file, char __user *buf,
32563 size_t count, loff_t *ppos)
32564 {
32565 struct qib_devdata *dd;
32566 @@ -318,6 +320,8 @@ bail:
32567 }
32568
32569 static ssize_t flash_write(struct file *file, const char __user *buf,
32570 + size_t count, loff_t *ppos) __size_overflow(3);
32571 +static ssize_t flash_write(struct file *file, const char __user *buf,
32572 size_t count, loff_t *ppos)
32573 {
32574 struct qib_devdata *dd;
32575 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32576 index c351aa4..e6967c2 100644
32577 --- a/drivers/input/gameport/gameport.c
32578 +++ b/drivers/input/gameport/gameport.c
32579 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32580 */
32581 static void gameport_init_port(struct gameport *gameport)
32582 {
32583 - static atomic_t gameport_no = ATOMIC_INIT(0);
32584 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32585
32586 __module_get(THIS_MODULE);
32587
32588 mutex_init(&gameport->drv_mutex);
32589 device_initialize(&gameport->dev);
32590 dev_set_name(&gameport->dev, "gameport%lu",
32591 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32592 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32593 gameport->dev.bus = &gameport_bus;
32594 gameport->dev.release = gameport_release_port;
32595 if (gameport->parent)
32596 diff --git a/drivers/input/input.c b/drivers/input/input.c
32597 index 1f78c95..3cddc6c 100644
32598 --- a/drivers/input/input.c
32599 +++ b/drivers/input/input.c
32600 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32601 */
32602 int input_register_device(struct input_dev *dev)
32603 {
32604 - static atomic_t input_no = ATOMIC_INIT(0);
32605 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32606 struct input_handler *handler;
32607 const char *path;
32608 int error;
32609 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32610 dev->setkeycode = input_default_setkeycode;
32611
32612 dev_set_name(&dev->dev, "input%ld",
32613 - (unsigned long) atomic_inc_return(&input_no) - 1);
32614 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32615
32616 error = device_add(&dev->dev);
32617 if (error)
32618 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32619 index b8d8611..7a4a04b 100644
32620 --- a/drivers/input/joystick/sidewinder.c
32621 +++ b/drivers/input/joystick/sidewinder.c
32622 @@ -30,6 +30,7 @@
32623 #include <linux/kernel.h>
32624 #include <linux/module.h>
32625 #include <linux/slab.h>
32626 +#include <linux/sched.h>
32627 #include <linux/init.h>
32628 #include <linux/input.h>
32629 #include <linux/gameport.h>
32630 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32631 index fd7a0d5..a4af10c 100644
32632 --- a/drivers/input/joystick/xpad.c
32633 +++ b/drivers/input/joystick/xpad.c
32634 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32635
32636 static int xpad_led_probe(struct usb_xpad *xpad)
32637 {
32638 - static atomic_t led_seq = ATOMIC_INIT(0);
32639 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32640 long led_no;
32641 struct xpad_led *led;
32642 struct led_classdev *led_cdev;
32643 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32644 if (!led)
32645 return -ENOMEM;
32646
32647 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32648 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32649
32650 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32651 led->xpad = xpad;
32652 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32653 index 0110b5a..d3ad144 100644
32654 --- a/drivers/input/mousedev.c
32655 +++ b/drivers/input/mousedev.c
32656 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32657
32658 spin_unlock_irq(&client->packet_lock);
32659
32660 - if (copy_to_user(buffer, data, count))
32661 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32662 return -EFAULT;
32663
32664 return count;
32665 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32666 index ba70058..571d25d 100644
32667 --- a/drivers/input/serio/serio.c
32668 +++ b/drivers/input/serio/serio.c
32669 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32670 */
32671 static void serio_init_port(struct serio *serio)
32672 {
32673 - static atomic_t serio_no = ATOMIC_INIT(0);
32674 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32675
32676 __module_get(THIS_MODULE);
32677
32678 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32679 mutex_init(&serio->drv_mutex);
32680 device_initialize(&serio->dev);
32681 dev_set_name(&serio->dev, "serio%ld",
32682 - (long)atomic_inc_return(&serio_no) - 1);
32683 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32684 serio->dev.bus = &serio_bus;
32685 serio->dev.release = serio_release_port;
32686 serio->dev.groups = serio_device_attr_groups;
32687 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32688 index e44933d..9ba484a 100644
32689 --- a/drivers/isdn/capi/capi.c
32690 +++ b/drivers/isdn/capi/capi.c
32691 @@ -83,8 +83,8 @@ struct capiminor {
32692
32693 struct capi20_appl *ap;
32694 u32 ncci;
32695 - atomic_t datahandle;
32696 - atomic_t msgid;
32697 + atomic_unchecked_t datahandle;
32698 + atomic_unchecked_t msgid;
32699
32700 struct tty_port port;
32701 int ttyinstop;
32702 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32703 capimsg_setu16(s, 2, mp->ap->applid);
32704 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32705 capimsg_setu8 (s, 5, CAPI_RESP);
32706 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32707 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32708 capimsg_setu32(s, 8, mp->ncci);
32709 capimsg_setu16(s, 12, datahandle);
32710 }
32711 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32712 mp->outbytes -= len;
32713 spin_unlock_bh(&mp->outlock);
32714
32715 - datahandle = atomic_inc_return(&mp->datahandle);
32716 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32717 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32718 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32719 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32720 capimsg_setu16(skb->data, 2, mp->ap->applid);
32721 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32722 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32723 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32724 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32725 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32726 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32727 capimsg_setu16(skb->data, 16, len); /* Data length */
32728 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32729 index db621db..825ea1a 100644
32730 --- a/drivers/isdn/gigaset/common.c
32731 +++ b/drivers/isdn/gigaset/common.c
32732 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32733 cs->commands_pending = 0;
32734 cs->cur_at_seq = 0;
32735 cs->gotfwver = -1;
32736 - cs->open_count = 0;
32737 + local_set(&cs->open_count, 0);
32738 cs->dev = NULL;
32739 cs->tty = NULL;
32740 cs->tty_dev = NULL;
32741 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32742 index 212efaf..f187c6b 100644
32743 --- a/drivers/isdn/gigaset/gigaset.h
32744 +++ b/drivers/isdn/gigaset/gigaset.h
32745 @@ -35,6 +35,7 @@
32746 #include <linux/tty_driver.h>
32747 #include <linux/list.h>
32748 #include <linux/atomic.h>
32749 +#include <asm/local.h>
32750
32751 #define GIG_VERSION {0, 5, 0, 0}
32752 #define GIG_COMPAT {0, 4, 0, 0}
32753 @@ -433,7 +434,7 @@ struct cardstate {
32754 spinlock_t cmdlock;
32755 unsigned curlen, cmdbytes;
32756
32757 - unsigned open_count;
32758 + local_t open_count;
32759 struct tty_struct *tty;
32760 struct tasklet_struct if_wake_tasklet;
32761 unsigned control_state;
32762 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32763 index ee0a549..a7c9798 100644
32764 --- a/drivers/isdn/gigaset/interface.c
32765 +++ b/drivers/isdn/gigaset/interface.c
32766 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32767 }
32768 tty->driver_data = cs;
32769
32770 - ++cs->open_count;
32771 -
32772 - if (cs->open_count == 1) {
32773 + if (local_inc_return(&cs->open_count) == 1) {
32774 spin_lock_irqsave(&cs->lock, flags);
32775 cs->tty = tty;
32776 spin_unlock_irqrestore(&cs->lock, flags);
32777 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32778
32779 if (!cs->connected)
32780 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32781 - else if (!cs->open_count)
32782 + else if (!local_read(&cs->open_count))
32783 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32784 else {
32785 - if (!--cs->open_count) {
32786 + if (!local_dec_return(&cs->open_count)) {
32787 spin_lock_irqsave(&cs->lock, flags);
32788 cs->tty = NULL;
32789 spin_unlock_irqrestore(&cs->lock, flags);
32790 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32791 if (!cs->connected) {
32792 gig_dbg(DEBUG_IF, "not connected");
32793 retval = -ENODEV;
32794 - } else if (!cs->open_count)
32795 + } else if (!local_read(&cs->open_count))
32796 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32797 else {
32798 retval = 0;
32799 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32800 retval = -ENODEV;
32801 goto done;
32802 }
32803 - if (!cs->open_count) {
32804 + if (!local_read(&cs->open_count)) {
32805 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32806 retval = -ENODEV;
32807 goto done;
32808 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32809 if (!cs->connected) {
32810 gig_dbg(DEBUG_IF, "not connected");
32811 retval = -ENODEV;
32812 - } else if (!cs->open_count)
32813 + } else if (!local_read(&cs->open_count))
32814 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32815 else if (cs->mstate != MS_LOCKED) {
32816 dev_warn(cs->dev, "can't write to unlocked device\n");
32817 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32818
32819 if (!cs->connected)
32820 gig_dbg(DEBUG_IF, "not connected");
32821 - else if (!cs->open_count)
32822 + else if (!local_read(&cs->open_count))
32823 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32824 else if (cs->mstate != MS_LOCKED)
32825 dev_warn(cs->dev, "can't write to unlocked device\n");
32826 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32827
32828 if (!cs->connected)
32829 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32830 - else if (!cs->open_count)
32831 + else if (!local_read(&cs->open_count))
32832 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32833 else
32834 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32835 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32836
32837 if (!cs->connected)
32838 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32839 - else if (!cs->open_count)
32840 + else if (!local_read(&cs->open_count))
32841 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32842 else
32843 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32844 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32845 goto out;
32846 }
32847
32848 - if (!cs->open_count) {
32849 + if (!local_read(&cs->open_count)) {
32850 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32851 goto out;
32852 }
32853 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32854 index 2a57da59..e7a12ed 100644
32855 --- a/drivers/isdn/hardware/avm/b1.c
32856 +++ b/drivers/isdn/hardware/avm/b1.c
32857 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32858 }
32859 if (left) {
32860 if (t4file->user) {
32861 - if (copy_from_user(buf, dp, left))
32862 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32863 return -EFAULT;
32864 } else {
32865 memcpy(buf, dp, left);
32866 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32867 }
32868 if (left) {
32869 if (config->user) {
32870 - if (copy_from_user(buf, dp, left))
32871 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32872 return -EFAULT;
32873 } else {
32874 memcpy(buf, dp, left);
32875 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32876 index 85784a7..a19ca98 100644
32877 --- a/drivers/isdn/hardware/eicon/divasync.h
32878 +++ b/drivers/isdn/hardware/eicon/divasync.h
32879 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32880 } diva_didd_add_adapter_t;
32881 typedef struct _diva_didd_remove_adapter {
32882 IDI_CALL p_request;
32883 -} diva_didd_remove_adapter_t;
32884 +} __no_const diva_didd_remove_adapter_t;
32885 typedef struct _diva_didd_read_adapter_array {
32886 void * buffer;
32887 dword length;
32888 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32889 index a3bd163..8956575 100644
32890 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32891 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32892 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32893 typedef struct _diva_os_idi_adapter_interface {
32894 diva_init_card_proc_t cleanup_adapter_proc;
32895 diva_cmd_card_proc_t cmd_proc;
32896 -} diva_os_idi_adapter_interface_t;
32897 +} __no_const diva_os_idi_adapter_interface_t;
32898
32899 typedef struct _diva_os_xdi_adapter {
32900 struct list_head link;
32901 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32902 index 1f355bb..43f1fea 100644
32903 --- a/drivers/isdn/icn/icn.c
32904 +++ b/drivers/isdn/icn/icn.c
32905 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32906 if (count > len)
32907 count = len;
32908 if (user) {
32909 - if (copy_from_user(msg, buf, count))
32910 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32911 return -EFAULT;
32912 } else
32913 memcpy(msg, buf, count);
32914 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32915 index b5fdcb7..5b6c59f 100644
32916 --- a/drivers/lguest/core.c
32917 +++ b/drivers/lguest/core.c
32918 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32919 * it's worked so far. The end address needs +1 because __get_vm_area
32920 * allocates an extra guard page, so we need space for that.
32921 */
32922 +
32923 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32924 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32925 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32926 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32927 +#else
32928 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32929 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32930 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32931 +#endif
32932 +
32933 if (!switcher_vma) {
32934 err = -ENOMEM;
32935 printk("lguest: could not map switcher pages high\n");
32936 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32937 * Now the Switcher is mapped at the right address, we can't fail!
32938 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32939 */
32940 - memcpy(switcher_vma->addr, start_switcher_text,
32941 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32942 end_switcher_text - start_switcher_text);
32943
32944 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32945 diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
32946 index ff4a0bc..f5fdd9c 100644
32947 --- a/drivers/lguest/lguest_user.c
32948 +++ b/drivers/lguest/lguest_user.c
32949 @@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
32950 * Once our Guest is initialized, the Launcher makes it run by reading
32951 * from /dev/lguest.
32952 */
32953 +static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
32954 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
32955 {
32956 struct lguest *lg = file->private_data;
32957 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32958 index 3980903..ce25c5e 100644
32959 --- a/drivers/lguest/x86/core.c
32960 +++ b/drivers/lguest/x86/core.c
32961 @@ -59,7 +59,7 @@ static struct {
32962 /* Offset from where switcher.S was compiled to where we've copied it */
32963 static unsigned long switcher_offset(void)
32964 {
32965 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32966 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32967 }
32968
32969 /* This cpu's struct lguest_pages. */
32970 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32971 * These copies are pretty cheap, so we do them unconditionally: */
32972 /* Save the current Host top-level page directory.
32973 */
32974 +
32975 +#ifdef CONFIG_PAX_PER_CPU_PGD
32976 + pages->state.host_cr3 = read_cr3();
32977 +#else
32978 pages->state.host_cr3 = __pa(current->mm->pgd);
32979 +#endif
32980 +
32981 /*
32982 * Set up the Guest's page tables to see this CPU's pages (and no
32983 * other CPU's pages).
32984 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32985 * compiled-in switcher code and the high-mapped copy we just made.
32986 */
32987 for (i = 0; i < IDT_ENTRIES; i++)
32988 - default_idt_entries[i] += switcher_offset();
32989 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32990
32991 /*
32992 * Set up the Switcher's per-cpu areas.
32993 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32994 * it will be undisturbed when we switch. To change %cs and jump we
32995 * need this structure to feed to Intel's "lcall" instruction.
32996 */
32997 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32998 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32999 lguest_entry.segment = LGUEST_CS;
33000
33001 /*
33002 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33003 index 40634b0..4f5855e 100644
33004 --- a/drivers/lguest/x86/switcher_32.S
33005 +++ b/drivers/lguest/x86/switcher_32.S
33006 @@ -87,6 +87,7 @@
33007 #include <asm/page.h>
33008 #include <asm/segment.h>
33009 #include <asm/lguest.h>
33010 +#include <asm/processor-flags.h>
33011
33012 // We mark the start of the code to copy
33013 // It's placed in .text tho it's never run here
33014 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33015 // Changes type when we load it: damn Intel!
33016 // For after we switch over our page tables
33017 // That entry will be read-only: we'd crash.
33018 +
33019 +#ifdef CONFIG_PAX_KERNEXEC
33020 + mov %cr0, %edx
33021 + xor $X86_CR0_WP, %edx
33022 + mov %edx, %cr0
33023 +#endif
33024 +
33025 movl $(GDT_ENTRY_TSS*8), %edx
33026 ltr %dx
33027
33028 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33029 // Let's clear it again for our return.
33030 // The GDT descriptor of the Host
33031 // Points to the table after two "size" bytes
33032 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33033 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33034 // Clear "used" from type field (byte 5, bit 2)
33035 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33036 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33037 +
33038 +#ifdef CONFIG_PAX_KERNEXEC
33039 + mov %cr0, %eax
33040 + xor $X86_CR0_WP, %eax
33041 + mov %eax, %cr0
33042 +#endif
33043
33044 // Once our page table's switched, the Guest is live!
33045 // The Host fades as we run this final step.
33046 @@ -295,13 +309,12 @@ deliver_to_host:
33047 // I consulted gcc, and it gave
33048 // These instructions, which I gladly credit:
33049 leal (%edx,%ebx,8), %eax
33050 - movzwl (%eax),%edx
33051 - movl 4(%eax), %eax
33052 - xorw %ax, %ax
33053 - orl %eax, %edx
33054 + movl 4(%eax), %edx
33055 + movw (%eax), %dx
33056 // Now the address of the handler's in %edx
33057 // We call it now: its "iret" drops us home.
33058 - jmp *%edx
33059 + ljmp $__KERNEL_CS, $1f
33060 +1: jmp *%edx
33061
33062 // Every interrupt can come to us here
33063 // But we must truly tell each apart.
33064 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33065 index 4daf9e5..b8d1d0f 100644
33066 --- a/drivers/macintosh/macio_asic.c
33067 +++ b/drivers/macintosh/macio_asic.c
33068 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33069 * MacIO is matched against any Apple ID, it's probe() function
33070 * will then decide wether it applies or not
33071 */
33072 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33073 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33074 .vendor = PCI_VENDOR_ID_APPLE,
33075 .device = PCI_ANY_ID,
33076 .subvendor = PCI_ANY_ID,
33077 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33078 index 1ce84ed..0fdd40a 100644
33079 --- a/drivers/md/dm-ioctl.c
33080 +++ b/drivers/md/dm-ioctl.c
33081 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33082 cmd == DM_LIST_VERSIONS_CMD)
33083 return 0;
33084
33085 - if ((cmd == DM_DEV_CREATE_CMD)) {
33086 + if (cmd == DM_DEV_CREATE_CMD) {
33087 if (!*param->name) {
33088 DMWARN("name not supplied when creating device");
33089 return -EINVAL;
33090 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33091 index 9bfd057..5373ff3 100644
33092 --- a/drivers/md/dm-raid1.c
33093 +++ b/drivers/md/dm-raid1.c
33094 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33095
33096 struct mirror {
33097 struct mirror_set *ms;
33098 - atomic_t error_count;
33099 + atomic_unchecked_t error_count;
33100 unsigned long error_type;
33101 struct dm_dev *dev;
33102 sector_t offset;
33103 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33104 struct mirror *m;
33105
33106 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33107 - if (!atomic_read(&m->error_count))
33108 + if (!atomic_read_unchecked(&m->error_count))
33109 return m;
33110
33111 return NULL;
33112 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33113 * simple way to tell if a device has encountered
33114 * errors.
33115 */
33116 - atomic_inc(&m->error_count);
33117 + atomic_inc_unchecked(&m->error_count);
33118
33119 if (test_and_set_bit(error_type, &m->error_type))
33120 return;
33121 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33122 struct mirror *m = get_default_mirror(ms);
33123
33124 do {
33125 - if (likely(!atomic_read(&m->error_count)))
33126 + if (likely(!atomic_read_unchecked(&m->error_count)))
33127 return m;
33128
33129 if (m-- == ms->mirror)
33130 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33131 {
33132 struct mirror *default_mirror = get_default_mirror(m->ms);
33133
33134 - return !atomic_read(&default_mirror->error_count);
33135 + return !atomic_read_unchecked(&default_mirror->error_count);
33136 }
33137
33138 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33139 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33140 */
33141 if (likely(region_in_sync(ms, region, 1)))
33142 m = choose_mirror(ms, bio->bi_sector);
33143 - else if (m && atomic_read(&m->error_count))
33144 + else if (m && atomic_read_unchecked(&m->error_count))
33145 m = NULL;
33146
33147 if (likely(m))
33148 @@ -848,6 +848,10 @@ static void do_mirror(struct work_struct *work)
33149 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33150 uint32_t region_size,
33151 struct dm_target *ti,
33152 + struct dm_dirty_log *dl) __size_overflow(1);
33153 +static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33154 + uint32_t region_size,
33155 + struct dm_target *ti,
33156 struct dm_dirty_log *dl)
33157 {
33158 size_t len;
33159 @@ -937,7 +941,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33160 }
33161
33162 ms->mirror[mirror].ms = ms;
33163 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33164 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33165 ms->mirror[mirror].error_type = 0;
33166 ms->mirror[mirror].offset = offset;
33167
33168 @@ -1347,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33169 */
33170 static char device_status_char(struct mirror *m)
33171 {
33172 - if (!atomic_read(&(m->error_count)))
33173 + if (!atomic_read_unchecked(&(m->error_count)))
33174 return 'A';
33175
33176 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33177 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33178 index 3d80cf0..7d98e1a 100644
33179 --- a/drivers/md/dm-stripe.c
33180 +++ b/drivers/md/dm-stripe.c
33181 @@ -20,7 +20,7 @@ struct stripe {
33182 struct dm_dev *dev;
33183 sector_t physical_start;
33184
33185 - atomic_t error_count;
33186 + atomic_unchecked_t error_count;
33187 };
33188
33189 struct stripe_c {
33190 @@ -55,6 +55,7 @@ static void trigger_event(struct work_struct *work)
33191 dm_table_event(sc->ti->table);
33192 }
33193
33194 +static inline struct stripe_c *alloc_context(unsigned int stripes) __size_overflow(1);
33195 static inline struct stripe_c *alloc_context(unsigned int stripes)
33196 {
33197 size_t len;
33198 @@ -192,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33199 kfree(sc);
33200 return r;
33201 }
33202 - atomic_set(&(sc->stripe[i].error_count), 0);
33203 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33204 }
33205
33206 ti->private = sc;
33207 @@ -314,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33208 DMEMIT("%d ", sc->stripes);
33209 for (i = 0; i < sc->stripes; i++) {
33210 DMEMIT("%s ", sc->stripe[i].dev->name);
33211 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33212 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33213 'D' : 'A';
33214 }
33215 buffer[i] = '\0';
33216 @@ -361,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33217 */
33218 for (i = 0; i < sc->stripes; i++)
33219 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33220 - atomic_inc(&(sc->stripe[i].error_count));
33221 - if (atomic_read(&(sc->stripe[i].error_count)) <
33222 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33223 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33224 DM_IO_ERROR_THRESHOLD)
33225 schedule_work(&sc->trigger_event);
33226 }
33227 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33228 index 63cc542..8d45caf3 100644
33229 --- a/drivers/md/dm-table.c
33230 +++ b/drivers/md/dm-table.c
33231 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33232 if (!dev_size)
33233 return 0;
33234
33235 - if ((start >= dev_size) || (start + len > dev_size)) {
33236 + if ((start >= dev_size) || (len > dev_size - start)) {
33237 DMWARN("%s: %s too small for target: "
33238 "start=%llu, len=%llu, dev_size=%llu",
33239 dm_device_name(ti->table->md), bdevname(bdev, b),
33240 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33241 index 237571a..fb6d19b 100644
33242 --- a/drivers/md/dm-thin-metadata.c
33243 +++ b/drivers/md/dm-thin-metadata.c
33244 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33245
33246 pmd->info.tm = tm;
33247 pmd->info.levels = 2;
33248 - pmd->info.value_type.context = pmd->data_sm;
33249 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33250 pmd->info.value_type.size = sizeof(__le64);
33251 pmd->info.value_type.inc = data_block_inc;
33252 pmd->info.value_type.dec = data_block_dec;
33253 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33254
33255 pmd->bl_info.tm = tm;
33256 pmd->bl_info.levels = 1;
33257 - pmd->bl_info.value_type.context = pmd->data_sm;
33258 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33259 pmd->bl_info.value_type.size = sizeof(__le64);
33260 pmd->bl_info.value_type.inc = data_block_inc;
33261 pmd->bl_info.value_type.dec = data_block_dec;
33262 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33263 index b89c548..2af3ce4 100644
33264 --- a/drivers/md/dm.c
33265 +++ b/drivers/md/dm.c
33266 @@ -176,9 +176,9 @@ struct mapped_device {
33267 /*
33268 * Event handling.
33269 */
33270 - atomic_t event_nr;
33271 + atomic_unchecked_t event_nr;
33272 wait_queue_head_t eventq;
33273 - atomic_t uevent_seq;
33274 + atomic_unchecked_t uevent_seq;
33275 struct list_head uevent_list;
33276 spinlock_t uevent_lock; /* Protect access to uevent_list */
33277
33278 @@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
33279 rwlock_init(&md->map_lock);
33280 atomic_set(&md->holders, 1);
33281 atomic_set(&md->open_count, 0);
33282 - atomic_set(&md->event_nr, 0);
33283 - atomic_set(&md->uevent_seq, 0);
33284 + atomic_set_unchecked(&md->event_nr, 0);
33285 + atomic_set_unchecked(&md->uevent_seq, 0);
33286 INIT_LIST_HEAD(&md->uevent_list);
33287 spin_lock_init(&md->uevent_lock);
33288
33289 @@ -1979,7 +1979,7 @@ static void event_callback(void *context)
33290
33291 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33292
33293 - atomic_inc(&md->event_nr);
33294 + atomic_inc_unchecked(&md->event_nr);
33295 wake_up(&md->eventq);
33296 }
33297
33298 @@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33299
33300 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33301 {
33302 - return atomic_add_return(1, &md->uevent_seq);
33303 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33304 }
33305
33306 uint32_t dm_get_event_nr(struct mapped_device *md)
33307 {
33308 - return atomic_read(&md->event_nr);
33309 + return atomic_read_unchecked(&md->event_nr);
33310 }
33311
33312 int dm_wait_event(struct mapped_device *md, int event_nr)
33313 {
33314 return wait_event_interruptible(md->eventq,
33315 - (event_nr != atomic_read(&md->event_nr)));
33316 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33317 }
33318
33319 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33320 diff --git a/drivers/md/md.c b/drivers/md/md.c
33321 index 6acc846..80a6b96 100644
33322 --- a/drivers/md/md.c
33323 +++ b/drivers/md/md.c
33324 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33325 * start build, activate spare
33326 */
33327 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33328 -static atomic_t md_event_count;
33329 +static atomic_unchecked_t md_event_count;
33330 void md_new_event(struct mddev *mddev)
33331 {
33332 - atomic_inc(&md_event_count);
33333 + atomic_inc_unchecked(&md_event_count);
33334 wake_up(&md_event_waiters);
33335 }
33336 EXPORT_SYMBOL_GPL(md_new_event);
33337 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33338 */
33339 static void md_new_event_inintr(struct mddev *mddev)
33340 {
33341 - atomic_inc(&md_event_count);
33342 + atomic_inc_unchecked(&md_event_count);
33343 wake_up(&md_event_waiters);
33344 }
33345
33346 @@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33347
33348 rdev->preferred_minor = 0xffff;
33349 rdev->data_offset = le64_to_cpu(sb->data_offset);
33350 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33351 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33352
33353 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33354 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33355 @@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33356 else
33357 sb->resync_offset = cpu_to_le64(0);
33358
33359 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33360 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33361
33362 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33363 sb->size = cpu_to_le64(mddev->dev_sectors);
33364 @@ -2689,7 +2689,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33365 static ssize_t
33366 errors_show(struct md_rdev *rdev, char *page)
33367 {
33368 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33369 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33370 }
33371
33372 static ssize_t
33373 @@ -2698,7 +2698,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33374 char *e;
33375 unsigned long n = simple_strtoul(buf, &e, 10);
33376 if (*buf && (*e == 0 || *e == '\n')) {
33377 - atomic_set(&rdev->corrected_errors, n);
33378 + atomic_set_unchecked(&rdev->corrected_errors, n);
33379 return len;
33380 }
33381 return -EINVAL;
33382 @@ -3084,8 +3084,8 @@ int md_rdev_init(struct md_rdev *rdev)
33383 rdev->sb_loaded = 0;
33384 rdev->bb_page = NULL;
33385 atomic_set(&rdev->nr_pending, 0);
33386 - atomic_set(&rdev->read_errors, 0);
33387 - atomic_set(&rdev->corrected_errors, 0);
33388 + atomic_set_unchecked(&rdev->read_errors, 0);
33389 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33390
33391 INIT_LIST_HEAD(&rdev->same_set);
33392 init_waitqueue_head(&rdev->blocked_wait);
33393 @@ -6736,7 +6736,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33394
33395 spin_unlock(&pers_lock);
33396 seq_printf(seq, "\n");
33397 - seq->poll_event = atomic_read(&md_event_count);
33398 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33399 return 0;
33400 }
33401 if (v == (void*)2) {
33402 @@ -6828,7 +6828,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33403 chunk_kb ? "KB" : "B");
33404 if (bitmap->file) {
33405 seq_printf(seq, ", file: ");
33406 - seq_path(seq, &bitmap->file->f_path, " \t\n");
33407 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33408 }
33409
33410 seq_printf(seq, "\n");
33411 @@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33412 return error;
33413
33414 seq = file->private_data;
33415 - seq->poll_event = atomic_read(&md_event_count);
33416 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33417 return error;
33418 }
33419
33420 @@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33421 /* always allow read */
33422 mask = POLLIN | POLLRDNORM;
33423
33424 - if (seq->poll_event != atomic_read(&md_event_count))
33425 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33426 mask |= POLLERR | POLLPRI;
33427 return mask;
33428 }
33429 @@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33430 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33431 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33432 (int)part_stat_read(&disk->part0, sectors[1]) -
33433 - atomic_read(&disk->sync_io);
33434 + atomic_read_unchecked(&disk->sync_io);
33435 /* sync IO will cause sync_io to increase before the disk_stats
33436 * as sync_io is counted when a request starts, and
33437 * disk_stats is counted when it completes.
33438 diff --git a/drivers/md/md.h b/drivers/md/md.h
33439 index 44c63df..b795d1a 100644
33440 --- a/drivers/md/md.h
33441 +++ b/drivers/md/md.h
33442 @@ -93,13 +93,13 @@ struct md_rdev {
33443 * only maintained for arrays that
33444 * support hot removal
33445 */
33446 - atomic_t read_errors; /* number of consecutive read errors that
33447 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33448 * we have tried to ignore.
33449 */
33450 struct timespec last_read_error; /* monotonic time since our
33451 * last read error
33452 */
33453 - atomic_t corrected_errors; /* number of corrected read errors,
33454 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33455 * for reporting to userspace and storing
33456 * in superblock.
33457 */
33458 @@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33459
33460 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33461 {
33462 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33463 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33464 }
33465
33466 struct md_personality
33467 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33468 index 50ed53b..4f29d7d 100644
33469 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33470 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33471 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33472 /*----------------------------------------------------------------*/
33473
33474 struct sm_checker {
33475 - struct dm_space_map sm;
33476 + dm_space_map_no_const sm;
33477
33478 struct count_array old_counts;
33479 struct count_array counts;
33480 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33481 index fc469ba..2d91555 100644
33482 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33483 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33484 @@ -23,7 +23,7 @@
33485 * Space map interface.
33486 */
33487 struct sm_disk {
33488 - struct dm_space_map sm;
33489 + dm_space_map_no_const sm;
33490
33491 struct ll_disk ll;
33492 struct ll_disk old_ll;
33493 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33494 index e89ae5e..062e4c2 100644
33495 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33496 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33497 @@ -43,7 +43,7 @@ struct block_op {
33498 };
33499
33500 struct sm_metadata {
33501 - struct dm_space_map sm;
33502 + dm_space_map_no_const sm;
33503
33504 struct ll_disk ll;
33505 struct ll_disk old_ll;
33506 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33507 index 1cbfc6b..56e1dbb 100644
33508 --- a/drivers/md/persistent-data/dm-space-map.h
33509 +++ b/drivers/md/persistent-data/dm-space-map.h
33510 @@ -60,6 +60,7 @@ struct dm_space_map {
33511 int (*root_size)(struct dm_space_map *sm, size_t *result);
33512 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33513 };
33514 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33515
33516 /*----------------------------------------------------------------*/
33517
33518 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33519 index edc735a..e9b97f1 100644
33520 --- a/drivers/md/raid1.c
33521 +++ b/drivers/md/raid1.c
33522 @@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33523 if (r1_sync_page_io(rdev, sect, s,
33524 bio->bi_io_vec[idx].bv_page,
33525 READ) != 0)
33526 - atomic_add(s, &rdev->corrected_errors);
33527 + atomic_add_unchecked(s, &rdev->corrected_errors);
33528 }
33529 sectors -= s;
33530 sect += s;
33531 @@ -1859,7 +1859,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33532 test_bit(In_sync, &rdev->flags)) {
33533 if (r1_sync_page_io(rdev, sect, s,
33534 conf->tmppage, READ)) {
33535 - atomic_add(s, &rdev->corrected_errors);
33536 + atomic_add_unchecked(s, &rdev->corrected_errors);
33537 printk(KERN_INFO
33538 "md/raid1:%s: read error corrected "
33539 "(%d sectors at %llu on %s)\n",
33540 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33541 index 1898389..a3aa617 100644
33542 --- a/drivers/md/raid10.c
33543 +++ b/drivers/md/raid10.c
33544 @@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
33545 /* The write handler will notice the lack of
33546 * R10BIO_Uptodate and record any errors etc
33547 */
33548 - atomic_add(r10_bio->sectors,
33549 + atomic_add_unchecked(r10_bio->sectors,
33550 &conf->mirrors[d].rdev->corrected_errors);
33551
33552 /* for reconstruct, we always reschedule after a read.
33553 @@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33554 {
33555 struct timespec cur_time_mon;
33556 unsigned long hours_since_last;
33557 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33558 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33559
33560 ktime_get_ts(&cur_time_mon);
33561
33562 @@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33563 * overflowing the shift of read_errors by hours_since_last.
33564 */
33565 if (hours_since_last >= 8 * sizeof(read_errors))
33566 - atomic_set(&rdev->read_errors, 0);
33567 + atomic_set_unchecked(&rdev->read_errors, 0);
33568 else
33569 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33570 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33571 }
33572
33573 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33574 @@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33575 return;
33576
33577 check_decay_read_errors(mddev, rdev);
33578 - atomic_inc(&rdev->read_errors);
33579 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33580 + atomic_inc_unchecked(&rdev->read_errors);
33581 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33582 char b[BDEVNAME_SIZE];
33583 bdevname(rdev->bdev, b);
33584
33585 @@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33586 "md/raid10:%s: %s: Raid device exceeded "
33587 "read_error threshold [cur %d:max %d]\n",
33588 mdname(mddev), b,
33589 - atomic_read(&rdev->read_errors), max_read_errors);
33590 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33591 printk(KERN_NOTICE
33592 "md/raid10:%s: %s: Failing raid device\n",
33593 mdname(mddev), b);
33594 @@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33595 (unsigned long long)(
33596 sect + rdev->data_offset),
33597 bdevname(rdev->bdev, b));
33598 - atomic_add(s, &rdev->corrected_errors);
33599 + atomic_add_unchecked(s, &rdev->corrected_errors);
33600 }
33601
33602 rdev_dec_pending(rdev, mddev);
33603 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33604 index 360f2b9..08b5382 100644
33605 --- a/drivers/md/raid5.c
33606 +++ b/drivers/md/raid5.c
33607 @@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33608 (unsigned long long)(sh->sector
33609 + rdev->data_offset),
33610 bdevname(rdev->bdev, b));
33611 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33612 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33613 clear_bit(R5_ReadError, &sh->dev[i].flags);
33614 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33615 }
33616 - if (atomic_read(&rdev->read_errors))
33617 - atomic_set(&rdev->read_errors, 0);
33618 + if (atomic_read_unchecked(&rdev->read_errors))
33619 + atomic_set_unchecked(&rdev->read_errors, 0);
33620 } else {
33621 const char *bdn = bdevname(rdev->bdev, b);
33622 int retry = 0;
33623
33624 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33625 - atomic_inc(&rdev->read_errors);
33626 + atomic_inc_unchecked(&rdev->read_errors);
33627 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33628 printk_ratelimited(
33629 KERN_WARNING
33630 @@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33631 (unsigned long long)(sh->sector
33632 + rdev->data_offset),
33633 bdn);
33634 - else if (atomic_read(&rdev->read_errors)
33635 + else if (atomic_read_unchecked(&rdev->read_errors)
33636 > conf->max_nr_stripes)
33637 printk(KERN_WARNING
33638 "md/raid:%s: Too many read errors, failing device %s.\n",
33639 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33640 index ce4f858..7bcfb46 100644
33641 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33642 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33643 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33644 .subvendor = _subvend, .subdevice = _subdev, \
33645 .driver_data = (unsigned long)&_driverdata }
33646
33647 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33648 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33649 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33650 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33651 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33652 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33653 index a7d876f..8c21b61 100644
33654 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33655 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33656 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33657 union {
33658 dmx_ts_cb ts;
33659 dmx_section_cb sec;
33660 - } cb;
33661 + } __no_const cb;
33662
33663 struct dvb_demux *demux;
33664 void *priv;
33665 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33666 index 00a6732..70a682e 100644
33667 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33668 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33669 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33670 const struct dvb_device *template, void *priv, int type)
33671 {
33672 struct dvb_device *dvbdev;
33673 - struct file_operations *dvbdevfops;
33674 + file_operations_no_const *dvbdevfops;
33675 struct device *clsdev;
33676 int minor;
33677 int id;
33678 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33679 index 3940bb0..fb3952a 100644
33680 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33681 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33682 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33683
33684 struct dib0700_adapter_state {
33685 int (*set_param_save) (struct dvb_frontend *);
33686 -};
33687 +} __no_const;
33688
33689 static int dib7070_set_param_override(struct dvb_frontend *fe)
33690 {
33691 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33692 index 451c5a7..649f711 100644
33693 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33694 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33695 @@ -95,7 +95,7 @@ struct su3000_state {
33696
33697 struct s6x0_state {
33698 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33699 -};
33700 +} __no_const;
33701
33702 /* debug */
33703 static int dvb_usb_dw2102_debug;
33704 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33705 index 404f63a..4796533 100644
33706 --- a/drivers/media/dvb/frontends/dib3000.h
33707 +++ b/drivers/media/dvb/frontends/dib3000.h
33708 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33709 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33710 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33711 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33712 -};
33713 +} __no_const;
33714
33715 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33716 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33717 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33718 index 8418c02..8555013 100644
33719 --- a/drivers/media/dvb/ngene/ngene-cards.c
33720 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33721 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33722
33723 /****************************************************************************/
33724
33725 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33726 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33727 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33728 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33729 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33730 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33731 index 16a089f..ab1667d 100644
33732 --- a/drivers/media/radio/radio-cadet.c
33733 +++ b/drivers/media/radio/radio-cadet.c
33734 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33735 unsigned char readbuf[RDS_BUFFER];
33736 int i = 0;
33737
33738 + if (count > RDS_BUFFER)
33739 + return -EFAULT;
33740 mutex_lock(&dev->lock);
33741 if (dev->rdsstat == 0) {
33742 dev->rdsstat = 1;
33743 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33744 index 9cde353..8c6a1c3 100644
33745 --- a/drivers/media/video/au0828/au0828.h
33746 +++ b/drivers/media/video/au0828/au0828.h
33747 @@ -191,7 +191,7 @@ struct au0828_dev {
33748
33749 /* I2C */
33750 struct i2c_adapter i2c_adap;
33751 - struct i2c_algorithm i2c_algo;
33752 + i2c_algorithm_no_const i2c_algo;
33753 struct i2c_client i2c_client;
33754 u32 i2c_rc;
33755
33756 diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
33757 index ee91e295..04ad048 100644
33758 --- a/drivers/media/video/cpia2/cpia2_core.c
33759 +++ b/drivers/media/video/cpia2/cpia2_core.c
33760 @@ -86,6 +86,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
33761 return ret;
33762 }
33763
33764 +static void *rvmalloc(unsigned long size) __size_overflow(1);
33765 static void *rvmalloc(unsigned long size)
33766 {
33767 void *mem;
33768 diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
33769 index 82d195b..181103c 100644
33770 --- a/drivers/media/video/cx18/cx18-alsa-pcm.c
33771 +++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
33772 @@ -229,6 +229,8 @@ static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
33773
33774
33775 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33776 + size_t size) __size_overflow(2);
33777 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33778 size_t size)
33779 {
33780 struct snd_pcm_runtime *runtime = subs->runtime;
33781 diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
33782 index a2c2b7d..8f1bec7 100644
33783 --- a/drivers/media/video/cx231xx/cx231xx-audio.c
33784 +++ b/drivers/media/video/cx231xx/cx231xx-audio.c
33785 @@ -389,6 +389,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
33786 }
33787
33788 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33789 + size_t size) __size_overflow(2);
33790 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33791 size_t size)
33792 {
33793 struct snd_pcm_runtime *runtime = subs->runtime;
33794 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33795 index 04bf662..e0ac026 100644
33796 --- a/drivers/media/video/cx88/cx88-alsa.c
33797 +++ b/drivers/media/video/cx88/cx88-alsa.c
33798 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33799 * Only boards with eeprom and byte 1 at eeprom=1 have it
33800 */
33801
33802 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33803 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33804 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33805 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33806 {0, }
33807 diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
33808 index e2a7b77..753d0ee 100644
33809 --- a/drivers/media/video/em28xx/em28xx-audio.c
33810 +++ b/drivers/media/video/em28xx/em28xx-audio.c
33811 @@ -225,6 +225,8 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
33812 }
33813
33814 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33815 + size_t size) __size_overflow(2);
33816 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33817 size_t size)
33818 {
33819 struct snd_pcm_runtime *runtime = subs->runtime;
33820 diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
33821 index b09a3c8..6dcba0a 100644
33822 --- a/drivers/media/video/meye.c
33823 +++ b/drivers/media/video/meye.c
33824 @@ -72,6 +72,7 @@ static struct meye meye;
33825 /****************************************************************************/
33826 /* Memory allocation routines (stolen from bttv-driver.c) */
33827 /****************************************************************************/
33828 +static void *rvmalloc(unsigned long size) __size_overflow(1);
33829 static void *rvmalloc(unsigned long size)
33830 {
33831 void *mem;
33832 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33833 index 1fb7d5b..3901e77 100644
33834 --- a/drivers/media/video/omap/omap_vout.c
33835 +++ b/drivers/media/video/omap/omap_vout.c
33836 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33837 OMAP_VIDEO2,
33838 };
33839
33840 -static struct videobuf_queue_ops video_vbq_ops;
33841 /* Variables configurable through module params*/
33842 static u32 video1_numbuffers = 3;
33843 static u32 video2_numbuffers = 3;
33844 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33845 {
33846 struct videobuf_queue *q;
33847 struct omap_vout_device *vout = NULL;
33848 + static struct videobuf_queue_ops video_vbq_ops = {
33849 + .buf_setup = omap_vout_buffer_setup,
33850 + .buf_prepare = omap_vout_buffer_prepare,
33851 + .buf_release = omap_vout_buffer_release,
33852 + .buf_queue = omap_vout_buffer_queue,
33853 + };
33854
33855 vout = video_drvdata(file);
33856 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33857 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33858 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33859
33860 q = &vout->vbq;
33861 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33862 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33863 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33864 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33865 spin_lock_init(&vout->vbq_lock);
33866
33867 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33868 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33869 index 305e6aa..0143317 100644
33870 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33871 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33872 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33873
33874 /* I2C stuff */
33875 struct i2c_adapter i2c_adap;
33876 - struct i2c_algorithm i2c_algo;
33877 + i2c_algorithm_no_const i2c_algo;
33878 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33879 int i2c_cx25840_hack_state;
33880 int i2c_linked;
33881 diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
33882 index 2fd38a0..ddec3c4 100644
33883 --- a/drivers/media/video/saa7164/saa7164-encoder.c
33884 +++ b/drivers/media/video/saa7164/saa7164-encoder.c
33885 @@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
33886 }
33887
33888 static ssize_t fops_read(struct file *file, char __user *buffer,
33889 + size_t count, loff_t *pos) __size_overflow(3);
33890 +static ssize_t fops_read(struct file *file, char __user *buffer,
33891 size_t count, loff_t *pos)
33892 {
33893 struct saa7164_encoder_fh *fh = file->private_data;
33894 diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
33895 index e2e0341..b80056c 100644
33896 --- a/drivers/media/video/saa7164/saa7164-vbi.c
33897 +++ b/drivers/media/video/saa7164/saa7164-vbi.c
33898 @@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
33899 }
33900
33901 static ssize_t fops_read(struct file *file, char __user *buffer,
33902 + size_t count, loff_t *pos) __size_overflow(3);
33903 +static ssize_t fops_read(struct file *file, char __user *buffer,
33904 size_t count, loff_t *pos)
33905 {
33906 struct saa7164_vbi_fh *fh = file->private_data;
33907 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33908 index 4ed1c7c2..8f15e13 100644
33909 --- a/drivers/media/video/timblogiw.c
33910 +++ b/drivers/media/video/timblogiw.c
33911 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33912
33913 /* Platform device functions */
33914
33915 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33916 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33917 .vidioc_querycap = timblogiw_querycap,
33918 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33919 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33920 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33921 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33922 };
33923
33924 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33925 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33926 .owner = THIS_MODULE,
33927 .open = timblogiw_open,
33928 .release = timblogiw_close,
33929 diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
33930 index c969111..a7910f4 100644
33931 --- a/drivers/media/video/videobuf-dma-contig.c
33932 +++ b/drivers/media/video/videobuf-dma-contig.c
33933 @@ -184,6 +184,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
33934 return ret;
33935 }
33936
33937 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33938 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33939 {
33940 struct videobuf_dma_contig_memory *mem;
33941 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
33942 index f300dea..5fc9c4a 100644
33943 --- a/drivers/media/video/videobuf-dma-sg.c
33944 +++ b/drivers/media/video/videobuf-dma-sg.c
33945 @@ -419,6 +419,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
33946 struct videobuf_dma_sg_memory
33947 */
33948
33949 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33950 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33951 {
33952 struct videobuf_dma_sg_memory *mem;
33953 diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
33954 index df14258..12cc7a3 100644
33955 --- a/drivers/media/video/videobuf-vmalloc.c
33956 +++ b/drivers/media/video/videobuf-vmalloc.c
33957 @@ -135,6 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
33958 struct videobuf_dma_sg_memory
33959 */
33960
33961 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33962 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33963 {
33964 struct videobuf_vmalloc_memory *mem;
33965 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33966 index a7dc467..a55c423 100644
33967 --- a/drivers/message/fusion/mptbase.c
33968 +++ b/drivers/message/fusion/mptbase.c
33969 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33970 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33971 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33972
33973 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33974 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33975 +#else
33976 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33977 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33978 +#endif
33979 +
33980 /*
33981 * Rounding UP to nearest 4-kB boundary here...
33982 */
33983 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33984 index 551262e..7551198 100644
33985 --- a/drivers/message/fusion/mptsas.c
33986 +++ b/drivers/message/fusion/mptsas.c
33987 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33988 return 0;
33989 }
33990
33991 +static inline void
33992 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33993 +{
33994 + if (phy_info->port_details) {
33995 + phy_info->port_details->rphy = rphy;
33996 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33997 + ioc->name, rphy));
33998 + }
33999 +
34000 + if (rphy) {
34001 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34002 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34003 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34004 + ioc->name, rphy, rphy->dev.release));
34005 + }
34006 +}
34007 +
34008 /* no mutex */
34009 static void
34010 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34011 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34012 return NULL;
34013 }
34014
34015 -static inline void
34016 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34017 -{
34018 - if (phy_info->port_details) {
34019 - phy_info->port_details->rphy = rphy;
34020 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34021 - ioc->name, rphy));
34022 - }
34023 -
34024 - if (rphy) {
34025 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34026 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34027 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34028 - ioc->name, rphy, rphy->dev.release));
34029 - }
34030 -}
34031 -
34032 static inline struct sas_port *
34033 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34034 {
34035 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34036 index 0c3ced7..1fe34ec 100644
34037 --- a/drivers/message/fusion/mptscsih.c
34038 +++ b/drivers/message/fusion/mptscsih.c
34039 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34040
34041 h = shost_priv(SChost);
34042
34043 - if (h) {
34044 - if (h->info_kbuf == NULL)
34045 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34046 - return h->info_kbuf;
34047 - h->info_kbuf[0] = '\0';
34048 + if (!h)
34049 + return NULL;
34050
34051 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34052 - h->info_kbuf[size-1] = '\0';
34053 - }
34054 + if (h->info_kbuf == NULL)
34055 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34056 + return h->info_kbuf;
34057 + h->info_kbuf[0] = '\0';
34058 +
34059 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34060 + h->info_kbuf[size-1] = '\0';
34061
34062 return h->info_kbuf;
34063 }
34064 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34065 index 6d115c7..58ff7fd 100644
34066 --- a/drivers/message/i2o/i2o_proc.c
34067 +++ b/drivers/message/i2o/i2o_proc.c
34068 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34069 "Array Controller Device"
34070 };
34071
34072 -static char *chtostr(u8 * chars, int n)
34073 -{
34074 - char tmp[256];
34075 - tmp[0] = 0;
34076 - return strncat(tmp, (char *)chars, n);
34077 -}
34078 -
34079 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34080 char *group)
34081 {
34082 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34083
34084 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34085 seq_printf(seq, "%-#8x", ddm_table.module_id);
34086 - seq_printf(seq, "%-29s",
34087 - chtostr(ddm_table.module_name_version, 28));
34088 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34089 seq_printf(seq, "%9d ", ddm_table.data_size);
34090 seq_printf(seq, "%8d", ddm_table.code_size);
34091
34092 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34093
34094 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34095 seq_printf(seq, "%-#8x", dst->module_id);
34096 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34097 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34098 + seq_printf(seq, "%-.28s", dst->module_name_version);
34099 + seq_printf(seq, "%-.8s", dst->date);
34100 seq_printf(seq, "%8d ", dst->module_size);
34101 seq_printf(seq, "%8d ", dst->mpb_size);
34102 seq_printf(seq, "0x%04x", dst->module_flags);
34103 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34104 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34105 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34106 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34107 - seq_printf(seq, "Vendor info : %s\n",
34108 - chtostr((u8 *) (work32 + 2), 16));
34109 - seq_printf(seq, "Product info : %s\n",
34110 - chtostr((u8 *) (work32 + 6), 16));
34111 - seq_printf(seq, "Description : %s\n",
34112 - chtostr((u8 *) (work32 + 10), 16));
34113 - seq_printf(seq, "Product rev. : %s\n",
34114 - chtostr((u8 *) (work32 + 14), 8));
34115 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34116 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34117 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34118 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34119
34120 seq_printf(seq, "Serial number : ");
34121 print_serial_number(seq, (u8 *) (work32 + 16),
34122 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34123 }
34124
34125 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34126 - seq_printf(seq, "Module name : %s\n",
34127 - chtostr(result.module_name, 24));
34128 - seq_printf(seq, "Module revision : %s\n",
34129 - chtostr(result.module_rev, 8));
34130 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34131 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34132
34133 seq_printf(seq, "Serial number : ");
34134 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34135 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34136 return 0;
34137 }
34138
34139 - seq_printf(seq, "Device name : %s\n",
34140 - chtostr(result.device_name, 64));
34141 - seq_printf(seq, "Service name : %s\n",
34142 - chtostr(result.service_name, 64));
34143 - seq_printf(seq, "Physical name : %s\n",
34144 - chtostr(result.physical_location, 64));
34145 - seq_printf(seq, "Instance number : %s\n",
34146 - chtostr(result.instance_number, 4));
34147 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34148 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34149 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34150 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34151
34152 return 0;
34153 }
34154 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34155 index a8c08f3..155fe3d 100644
34156 --- a/drivers/message/i2o/iop.c
34157 +++ b/drivers/message/i2o/iop.c
34158 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34159
34160 spin_lock_irqsave(&c->context_list_lock, flags);
34161
34162 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34163 - atomic_inc(&c->context_list_counter);
34164 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34165 + atomic_inc_unchecked(&c->context_list_counter);
34166
34167 - entry->context = atomic_read(&c->context_list_counter);
34168 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34169
34170 list_add(&entry->list, &c->context_list);
34171
34172 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34173
34174 #if BITS_PER_LONG == 64
34175 spin_lock_init(&c->context_list_lock);
34176 - atomic_set(&c->context_list_counter, 0);
34177 + atomic_set_unchecked(&c->context_list_counter, 0);
34178 INIT_LIST_HEAD(&c->context_list);
34179 #endif
34180
34181 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34182 index 7ce65f4..e66e9bc 100644
34183 --- a/drivers/mfd/abx500-core.c
34184 +++ b/drivers/mfd/abx500-core.c
34185 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34186
34187 struct abx500_device_entry {
34188 struct list_head list;
34189 - struct abx500_ops ops;
34190 + abx500_ops_no_const ops;
34191 struct device *dev;
34192 };
34193
34194 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34195 index a9223ed..4127b13 100644
34196 --- a/drivers/mfd/janz-cmodio.c
34197 +++ b/drivers/mfd/janz-cmodio.c
34198 @@ -13,6 +13,7 @@
34199
34200 #include <linux/kernel.h>
34201 #include <linux/module.h>
34202 +#include <linux/slab.h>
34203 #include <linux/init.h>
34204 #include <linux/pci.h>
34205 #include <linux/interrupt.h>
34206 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34207 index a981e2a..5ca0c8b 100644
34208 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34209 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34210 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34211 * the lid is closed. This leads to interrupts as soon as a little move
34212 * is done.
34213 */
34214 - atomic_inc(&lis3->count);
34215 + atomic_inc_unchecked(&lis3->count);
34216
34217 wake_up_interruptible(&lis3->misc_wait);
34218 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34219 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34220 if (lis3->pm_dev)
34221 pm_runtime_get_sync(lis3->pm_dev);
34222
34223 - atomic_set(&lis3->count, 0);
34224 + atomic_set_unchecked(&lis3->count, 0);
34225 return 0;
34226 }
34227
34228 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34229 add_wait_queue(&lis3->misc_wait, &wait);
34230 while (true) {
34231 set_current_state(TASK_INTERRUPTIBLE);
34232 - data = atomic_xchg(&lis3->count, 0);
34233 + data = atomic_xchg_unchecked(&lis3->count, 0);
34234 if (data)
34235 break;
34236
34237 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34238 struct lis3lv02d, miscdev);
34239
34240 poll_wait(file, &lis3->misc_wait, wait);
34241 - if (atomic_read(&lis3->count))
34242 + if (atomic_read_unchecked(&lis3->count))
34243 return POLLIN | POLLRDNORM;
34244 return 0;
34245 }
34246 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34247 index 2b1482a..5d33616 100644
34248 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34249 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34250 @@ -266,7 +266,7 @@ struct lis3lv02d {
34251 struct input_polled_dev *idev; /* input device */
34252 struct platform_device *pdev; /* platform device */
34253 struct regulator_bulk_data regulators[2];
34254 - atomic_t count; /* interrupt count after last read */
34255 + atomic_unchecked_t count; /* interrupt count after last read */
34256 union axis_conversion ac; /* hw -> logical axis */
34257 int mapped_btns[3];
34258
34259 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34260 index 2f30bad..c4c13d0 100644
34261 --- a/drivers/misc/sgi-gru/gruhandles.c
34262 +++ b/drivers/misc/sgi-gru/gruhandles.c
34263 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34264 unsigned long nsec;
34265
34266 nsec = CLKS2NSEC(clks);
34267 - atomic_long_inc(&mcs_op_statistics[op].count);
34268 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34269 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34270 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34271 if (mcs_op_statistics[op].max < nsec)
34272 mcs_op_statistics[op].max = nsec;
34273 }
34274 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34275 index 950dbe9..eeef0f8 100644
34276 --- a/drivers/misc/sgi-gru/gruprocfs.c
34277 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34278 @@ -32,9 +32,9 @@
34279
34280 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34281
34282 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34283 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34284 {
34285 - unsigned long val = atomic_long_read(v);
34286 + unsigned long val = atomic_long_read_unchecked(v);
34287
34288 seq_printf(s, "%16lu %s\n", val, id);
34289 }
34290 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34291
34292 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34293 for (op = 0; op < mcsop_last; op++) {
34294 - count = atomic_long_read(&mcs_op_statistics[op].count);
34295 - total = atomic_long_read(&mcs_op_statistics[op].total);
34296 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34297 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34298 max = mcs_op_statistics[op].max;
34299 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34300 count ? total / count : 0, max);
34301 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34302 index 5c3ce24..4915ccb 100644
34303 --- a/drivers/misc/sgi-gru/grutables.h
34304 +++ b/drivers/misc/sgi-gru/grutables.h
34305 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34306 * GRU statistics.
34307 */
34308 struct gru_stats_s {
34309 - atomic_long_t vdata_alloc;
34310 - atomic_long_t vdata_free;
34311 - atomic_long_t gts_alloc;
34312 - atomic_long_t gts_free;
34313 - atomic_long_t gms_alloc;
34314 - atomic_long_t gms_free;
34315 - atomic_long_t gts_double_allocate;
34316 - atomic_long_t assign_context;
34317 - atomic_long_t assign_context_failed;
34318 - atomic_long_t free_context;
34319 - atomic_long_t load_user_context;
34320 - atomic_long_t load_kernel_context;
34321 - atomic_long_t lock_kernel_context;
34322 - atomic_long_t unlock_kernel_context;
34323 - atomic_long_t steal_user_context;
34324 - atomic_long_t steal_kernel_context;
34325 - atomic_long_t steal_context_failed;
34326 - atomic_long_t nopfn;
34327 - atomic_long_t asid_new;
34328 - atomic_long_t asid_next;
34329 - atomic_long_t asid_wrap;
34330 - atomic_long_t asid_reuse;
34331 - atomic_long_t intr;
34332 - atomic_long_t intr_cbr;
34333 - atomic_long_t intr_tfh;
34334 - atomic_long_t intr_spurious;
34335 - atomic_long_t intr_mm_lock_failed;
34336 - atomic_long_t call_os;
34337 - atomic_long_t call_os_wait_queue;
34338 - atomic_long_t user_flush_tlb;
34339 - atomic_long_t user_unload_context;
34340 - atomic_long_t user_exception;
34341 - atomic_long_t set_context_option;
34342 - atomic_long_t check_context_retarget_intr;
34343 - atomic_long_t check_context_unload;
34344 - atomic_long_t tlb_dropin;
34345 - atomic_long_t tlb_preload_page;
34346 - atomic_long_t tlb_dropin_fail_no_asid;
34347 - atomic_long_t tlb_dropin_fail_upm;
34348 - atomic_long_t tlb_dropin_fail_invalid;
34349 - atomic_long_t tlb_dropin_fail_range_active;
34350 - atomic_long_t tlb_dropin_fail_idle;
34351 - atomic_long_t tlb_dropin_fail_fmm;
34352 - atomic_long_t tlb_dropin_fail_no_exception;
34353 - atomic_long_t tfh_stale_on_fault;
34354 - atomic_long_t mmu_invalidate_range;
34355 - atomic_long_t mmu_invalidate_page;
34356 - atomic_long_t flush_tlb;
34357 - atomic_long_t flush_tlb_gru;
34358 - atomic_long_t flush_tlb_gru_tgh;
34359 - atomic_long_t flush_tlb_gru_zero_asid;
34360 + atomic_long_unchecked_t vdata_alloc;
34361 + atomic_long_unchecked_t vdata_free;
34362 + atomic_long_unchecked_t gts_alloc;
34363 + atomic_long_unchecked_t gts_free;
34364 + atomic_long_unchecked_t gms_alloc;
34365 + atomic_long_unchecked_t gms_free;
34366 + atomic_long_unchecked_t gts_double_allocate;
34367 + atomic_long_unchecked_t assign_context;
34368 + atomic_long_unchecked_t assign_context_failed;
34369 + atomic_long_unchecked_t free_context;
34370 + atomic_long_unchecked_t load_user_context;
34371 + atomic_long_unchecked_t load_kernel_context;
34372 + atomic_long_unchecked_t lock_kernel_context;
34373 + atomic_long_unchecked_t unlock_kernel_context;
34374 + atomic_long_unchecked_t steal_user_context;
34375 + atomic_long_unchecked_t steal_kernel_context;
34376 + atomic_long_unchecked_t steal_context_failed;
34377 + atomic_long_unchecked_t nopfn;
34378 + atomic_long_unchecked_t asid_new;
34379 + atomic_long_unchecked_t asid_next;
34380 + atomic_long_unchecked_t asid_wrap;
34381 + atomic_long_unchecked_t asid_reuse;
34382 + atomic_long_unchecked_t intr;
34383 + atomic_long_unchecked_t intr_cbr;
34384 + atomic_long_unchecked_t intr_tfh;
34385 + atomic_long_unchecked_t intr_spurious;
34386 + atomic_long_unchecked_t intr_mm_lock_failed;
34387 + atomic_long_unchecked_t call_os;
34388 + atomic_long_unchecked_t call_os_wait_queue;
34389 + atomic_long_unchecked_t user_flush_tlb;
34390 + atomic_long_unchecked_t user_unload_context;
34391 + atomic_long_unchecked_t user_exception;
34392 + atomic_long_unchecked_t set_context_option;
34393 + atomic_long_unchecked_t check_context_retarget_intr;
34394 + atomic_long_unchecked_t check_context_unload;
34395 + atomic_long_unchecked_t tlb_dropin;
34396 + atomic_long_unchecked_t tlb_preload_page;
34397 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34398 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34399 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34400 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34401 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34402 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34403 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34404 + atomic_long_unchecked_t tfh_stale_on_fault;
34405 + atomic_long_unchecked_t mmu_invalidate_range;
34406 + atomic_long_unchecked_t mmu_invalidate_page;
34407 + atomic_long_unchecked_t flush_tlb;
34408 + atomic_long_unchecked_t flush_tlb_gru;
34409 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34410 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34411
34412 - atomic_long_t copy_gpa;
34413 - atomic_long_t read_gpa;
34414 + atomic_long_unchecked_t copy_gpa;
34415 + atomic_long_unchecked_t read_gpa;
34416
34417 - atomic_long_t mesq_receive;
34418 - atomic_long_t mesq_receive_none;
34419 - atomic_long_t mesq_send;
34420 - atomic_long_t mesq_send_failed;
34421 - atomic_long_t mesq_noop;
34422 - atomic_long_t mesq_send_unexpected_error;
34423 - atomic_long_t mesq_send_lb_overflow;
34424 - atomic_long_t mesq_send_qlimit_reached;
34425 - atomic_long_t mesq_send_amo_nacked;
34426 - atomic_long_t mesq_send_put_nacked;
34427 - atomic_long_t mesq_page_overflow;
34428 - atomic_long_t mesq_qf_locked;
34429 - atomic_long_t mesq_qf_noop_not_full;
34430 - atomic_long_t mesq_qf_switch_head_failed;
34431 - atomic_long_t mesq_qf_unexpected_error;
34432 - atomic_long_t mesq_noop_unexpected_error;
34433 - atomic_long_t mesq_noop_lb_overflow;
34434 - atomic_long_t mesq_noop_qlimit_reached;
34435 - atomic_long_t mesq_noop_amo_nacked;
34436 - atomic_long_t mesq_noop_put_nacked;
34437 - atomic_long_t mesq_noop_page_overflow;
34438 + atomic_long_unchecked_t mesq_receive;
34439 + atomic_long_unchecked_t mesq_receive_none;
34440 + atomic_long_unchecked_t mesq_send;
34441 + atomic_long_unchecked_t mesq_send_failed;
34442 + atomic_long_unchecked_t mesq_noop;
34443 + atomic_long_unchecked_t mesq_send_unexpected_error;
34444 + atomic_long_unchecked_t mesq_send_lb_overflow;
34445 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34446 + atomic_long_unchecked_t mesq_send_amo_nacked;
34447 + atomic_long_unchecked_t mesq_send_put_nacked;
34448 + atomic_long_unchecked_t mesq_page_overflow;
34449 + atomic_long_unchecked_t mesq_qf_locked;
34450 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34451 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34452 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34453 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34454 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34455 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34456 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34457 + atomic_long_unchecked_t mesq_noop_put_nacked;
34458 + atomic_long_unchecked_t mesq_noop_page_overflow;
34459
34460 };
34461
34462 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34463 tghop_invalidate, mcsop_last};
34464
34465 struct mcs_op_statistic {
34466 - atomic_long_t count;
34467 - atomic_long_t total;
34468 + atomic_long_unchecked_t count;
34469 + atomic_long_unchecked_t total;
34470 unsigned long max;
34471 };
34472
34473 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34474
34475 #define STAT(id) do { \
34476 if (gru_options & OPT_STATS) \
34477 - atomic_long_inc(&gru_stats.id); \
34478 + atomic_long_inc_unchecked(&gru_stats.id); \
34479 } while (0)
34480
34481 #ifdef CONFIG_SGI_GRU_DEBUG
34482 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34483 index 851b2f2..a4ec097 100644
34484 --- a/drivers/misc/sgi-xp/xp.h
34485 +++ b/drivers/misc/sgi-xp/xp.h
34486 @@ -289,7 +289,7 @@ struct xpc_interface {
34487 xpc_notify_func, void *);
34488 void (*received) (short, int, void *);
34489 enum xp_retval (*partid_to_nasids) (short, void *);
34490 -};
34491 +} __no_const;
34492
34493 extern struct xpc_interface xpc_interface;
34494
34495 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34496 index b94d5f7..7f494c5 100644
34497 --- a/drivers/misc/sgi-xp/xpc.h
34498 +++ b/drivers/misc/sgi-xp/xpc.h
34499 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34500 void (*received_payload) (struct xpc_channel *, void *);
34501 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34502 };
34503 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34504
34505 /* struct xpc_partition act_state values (for XPC HB) */
34506
34507 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34508 /* found in xpc_main.c */
34509 extern struct device *xpc_part;
34510 extern struct device *xpc_chan;
34511 -extern struct xpc_arch_operations xpc_arch_ops;
34512 +extern xpc_arch_operations_no_const xpc_arch_ops;
34513 extern int xpc_disengage_timelimit;
34514 extern int xpc_disengage_timedout;
34515 extern int xpc_activate_IRQ_rcvd;
34516 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34517 index 8d082b4..aa749ae 100644
34518 --- a/drivers/misc/sgi-xp/xpc_main.c
34519 +++ b/drivers/misc/sgi-xp/xpc_main.c
34520 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34521 .notifier_call = xpc_system_die,
34522 };
34523
34524 -struct xpc_arch_operations xpc_arch_ops;
34525 +xpc_arch_operations_no_const xpc_arch_ops;
34526
34527 /*
34528 * Timer function to enforce the timelimit on the partition disengage.
34529 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34530 index 6ebdc40..9edf5d8 100644
34531 --- a/drivers/mmc/host/sdhci-pci.c
34532 +++ b/drivers/mmc/host/sdhci-pci.c
34533 @@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34534 .probe = via_probe,
34535 };
34536
34537 -static const struct pci_device_id pci_ids[] __devinitdata = {
34538 +static const struct pci_device_id pci_ids[] __devinitconst = {
34539 {
34540 .vendor = PCI_VENDOR_ID_RICOH,
34541 .device = PCI_DEVICE_ID_RICOH_R5C822,
34542 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34543 index 87a431c..4959b43 100644
34544 --- a/drivers/mtd/devices/doc2000.c
34545 +++ b/drivers/mtd/devices/doc2000.c
34546 @@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34547
34548 /* The ECC will not be calculated correctly if less than 512 is written */
34549 /* DBB-
34550 - if (len != 0x200 && eccbuf)
34551 + if (len != 0x200)
34552 printk(KERN_WARNING
34553 "ECC needs a full sector write (adr: %lx size %lx)\n",
34554 (long) to, (long) len);
34555 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
34556 index 9eacf67..4534b5b 100644
34557 --- a/drivers/mtd/devices/doc2001.c
34558 +++ b/drivers/mtd/devices/doc2001.c
34559 @@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
34560 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
34561
34562 /* Don't allow read past end of device */
34563 - if (from >= this->totlen)
34564 + if (from >= this->totlen || !len)
34565 return -EINVAL;
34566
34567 /* Don't allow a single read to cross a 512-byte block boundary */
34568 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34569 index 3984d48..28aa897 100644
34570 --- a/drivers/mtd/nand/denali.c
34571 +++ b/drivers/mtd/nand/denali.c
34572 @@ -26,6 +26,7 @@
34573 #include <linux/pci.h>
34574 #include <linux/mtd/mtd.h>
34575 #include <linux/module.h>
34576 +#include <linux/slab.h>
34577
34578 #include "denali.h"
34579
34580 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34581 index 51b9d6a..52af9a7 100644
34582 --- a/drivers/mtd/nftlmount.c
34583 +++ b/drivers/mtd/nftlmount.c
34584 @@ -24,6 +24,7 @@
34585 #include <asm/errno.h>
34586 #include <linux/delay.h>
34587 #include <linux/slab.h>
34588 +#include <linux/sched.h>
34589 #include <linux/mtd/mtd.h>
34590 #include <linux/mtd/nand.h>
34591 #include <linux/mtd/nftl.h>
34592 diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
34593 index e2cdebf..d48183a 100644
34594 --- a/drivers/mtd/ubi/debug.c
34595 +++ b/drivers/mtd/ubi/debug.c
34596 @@ -338,6 +338,8 @@ out:
34597
34598 /* Write an UBI debugfs file */
34599 static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34600 + size_t count, loff_t *ppos) __size_overflow(3);
34601 +static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34602 size_t count, loff_t *ppos)
34603 {
34604 unsigned long ubi_num = (unsigned long)file->private_data;
34605 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34606 index 071f4c8..440862e 100644
34607 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34608 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34609 @@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34610 */
34611
34612 #define ATL2_PARAM(X, desc) \
34613 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34614 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34615 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34616 MODULE_PARM_DESC(X, desc);
34617 #else
34618 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34619 index 66da39f..5dc436d 100644
34620 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34621 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34622 @@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
34623
34624 int (*wait_comp)(struct bnx2x *bp,
34625 struct bnx2x_rx_mode_ramrod_params *p);
34626 -};
34627 +} __no_const;
34628
34629 /********************** Set multicast group ***********************************/
34630
34631 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34632 index aea8f72..fcebf75 100644
34633 --- a/drivers/net/ethernet/broadcom/tg3.h
34634 +++ b/drivers/net/ethernet/broadcom/tg3.h
34635 @@ -140,6 +140,7 @@
34636 #define CHIPREV_ID_5750_A0 0x4000
34637 #define CHIPREV_ID_5750_A1 0x4001
34638 #define CHIPREV_ID_5750_A3 0x4003
34639 +#define CHIPREV_ID_5750_C1 0x4201
34640 #define CHIPREV_ID_5750_C2 0x4202
34641 #define CHIPREV_ID_5752_A0_HW 0x5000
34642 #define CHIPREV_ID_5752_A0 0x6000
34643 diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
34644 index 47a8435..248e4b3 100644
34645 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c
34646 +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
34647 @@ -1052,6 +1052,8 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
34648 * be copied but there is no memory for the copy.
34649 */
34650 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34651 + struct freelQ *fl, unsigned int len) __size_overflow(3);
34652 +static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34653 struct freelQ *fl, unsigned int len)
34654 {
34655 struct sk_buff *skb;
34656 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34657 index c4e8643..0979484 100644
34658 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34659 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34660 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34661 */
34662 struct l2t_skb_cb {
34663 arp_failure_handler_func arp_failure_handler;
34664 -};
34665 +} __no_const;
34666
34667 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34668
34669 diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34670 index cfb60e1..94af340 100644
34671 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
34672 +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34673 @@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
34674 * of the SW ring.
34675 */
34676 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
34677 + size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4);
34678 +static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
34679 size_t sw_size, dma_addr_t * phys, void *metadata)
34680 {
34681 size_t len = nelem * elem_size;
34682 @@ -777,6 +779,8 @@ static inline unsigned int flits_to_desc(unsigned int n)
34683 * be copied but there is no memory for the copy.
34684 */
34685 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
34686 + unsigned int len, unsigned int drop_thres) __size_overflow(3);
34687 +static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
34688 unsigned int len, unsigned int drop_thres)
34689 {
34690 struct sk_buff *skb = NULL;
34691 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
34692 index 2dae795..73037d2 100644
34693 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
34694 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
34695 @@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
34696 */
34697 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
34698 size_t sw_size, dma_addr_t *phys, void *metadata,
34699 + size_t stat_size, int node) __size_overflow(2,4);
34700 +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
34701 + size_t sw_size, dma_addr_t *phys, void *metadata,
34702 size_t stat_size, int node)
34703 {
34704 size_t len = nelem * elem_size + stat_size;
34705 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34706 index 0bd585b..d954ca5 100644
34707 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34708 +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34709 @@ -729,6 +729,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
34710 */
34711 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
34712 size_t swsize, dma_addr_t *busaddrp, void *swringp,
34713 + size_t stat_size) __size_overflow(2,4);
34714 +static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
34715 + size_t swsize, dma_addr_t *busaddrp, void *swringp,
34716 size_t stat_size)
34717 {
34718 /*
34719 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34720 index 4d71f5a..8004440 100644
34721 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34722 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34723 @@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34724 for (i=0; i<ETH_ALEN; i++) {
34725 tmp.addr[i] = dev->dev_addr[i];
34726 }
34727 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34728 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34729 break;
34730
34731 case DE4X5_SET_HWADDR: /* Set the hardware address */
34732 @@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34733 spin_lock_irqsave(&lp->lock, flags);
34734 memcpy(&statbuf, &lp->pktStats, ioc->len);
34735 spin_unlock_irqrestore(&lp->lock, flags);
34736 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34737 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34738 return -EFAULT;
34739 break;
34740 }
34741 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34742 index 14d5b61..1398636 100644
34743 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34744 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34745 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34746 {NULL}};
34747
34748
34749 -static const char *block_name[] __devinitdata = {
34750 +static const char *block_name[] __devinitconst = {
34751 "21140 non-MII",
34752 "21140 MII PHY",
34753 "21142 Serial PHY",
34754 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34755 index 52da7b2..4ddfe1c 100644
34756 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34757 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34758 @@ -236,7 +236,7 @@ struct pci_id_info {
34759 int drv_flags; /* Driver use, intended as capability flags. */
34760 };
34761
34762 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34763 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34764 { /* Sometime a Level-One switch card. */
34765 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34766 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34767 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34768 index 28a3a9b..d96cb63 100644
34769 --- a/drivers/net/ethernet/dlink/sundance.c
34770 +++ b/drivers/net/ethernet/dlink/sundance.c
34771 @@ -218,7 +218,7 @@ enum {
34772 struct pci_id_info {
34773 const char *name;
34774 };
34775 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34776 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34777 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34778 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34779 {"D-Link DFE-580TX 4 port Server Adapter"},
34780 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34781 index e703d64..d62ecf9 100644
34782 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34783 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34784 @@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34785
34786 if (wrapped)
34787 newacc += 65536;
34788 - ACCESS_ONCE(*acc) = newacc;
34789 + ACCESS_ONCE_RW(*acc) = newacc;
34790 }
34791
34792 void be_parse_stats(struct be_adapter *adapter)
34793 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34794 index 47f85c3..82ab6c4 100644
34795 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34796 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34797 @@ -31,6 +31,8 @@
34798 #include <linux/netdevice.h>
34799 #include <linux/phy.h>
34800 #include <linux/platform_device.h>
34801 +#include <linux/interrupt.h>
34802 +#include <linux/irqreturn.h>
34803 #include <net/ip.h>
34804
34805 #include "ftgmac100.h"
34806 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34807 index bb336a0..4b472da 100644
34808 --- a/drivers/net/ethernet/faraday/ftmac100.c
34809 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34810 @@ -31,6 +31,8 @@
34811 #include <linux/module.h>
34812 #include <linux/netdevice.h>
34813 #include <linux/platform_device.h>
34814 +#include <linux/interrupt.h>
34815 +#include <linux/irqreturn.h>
34816
34817 #include "ftmac100.h"
34818
34819 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34820 index c82d444..0007fb4 100644
34821 --- a/drivers/net/ethernet/fealnx.c
34822 +++ b/drivers/net/ethernet/fealnx.c
34823 @@ -150,7 +150,7 @@ struct chip_info {
34824 int flags;
34825 };
34826
34827 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34828 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34829 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34830 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34831 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34832 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34833 index e1159e5..e18684d 100644
34834 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34835 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34836 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34837 {
34838 struct e1000_hw *hw = &adapter->hw;
34839 struct e1000_mac_info *mac = &hw->mac;
34840 - struct e1000_mac_operations *func = &mac->ops;
34841 + e1000_mac_operations_no_const *func = &mac->ops;
34842
34843 /* Set media type */
34844 switch (adapter->pdev->device) {
34845 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34846 index a3e65fd..f451444 100644
34847 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34848 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34849 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34850 {
34851 struct e1000_hw *hw = &adapter->hw;
34852 struct e1000_mac_info *mac = &hw->mac;
34853 - struct e1000_mac_operations *func = &mac->ops;
34854 + e1000_mac_operations_no_const *func = &mac->ops;
34855 u32 swsm = 0;
34856 u32 swsm2 = 0;
34857 bool force_clear_smbi = false;
34858 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34859 index 2967039..ca8c40c 100644
34860 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34861 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34862 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34863 void (*write_vfta)(struct e1000_hw *, u32, u32);
34864 s32 (*read_mac_addr)(struct e1000_hw *);
34865 };
34866 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34867
34868 /*
34869 * When to use various PHY register access functions:
34870 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34871 void (*power_up)(struct e1000_hw *);
34872 void (*power_down)(struct e1000_hw *);
34873 };
34874 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34875
34876 /* Function pointers for the NVM. */
34877 struct e1000_nvm_operations {
34878 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34879 s32 (*validate)(struct e1000_hw *);
34880 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34881 };
34882 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34883
34884 struct e1000_mac_info {
34885 - struct e1000_mac_operations ops;
34886 + e1000_mac_operations_no_const ops;
34887 u8 addr[ETH_ALEN];
34888 u8 perm_addr[ETH_ALEN];
34889
34890 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34891 };
34892
34893 struct e1000_phy_info {
34894 - struct e1000_phy_operations ops;
34895 + e1000_phy_operations_no_const ops;
34896
34897 enum e1000_phy_type type;
34898
34899 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34900 };
34901
34902 struct e1000_nvm_info {
34903 - struct e1000_nvm_operations ops;
34904 + e1000_nvm_operations_no_const ops;
34905
34906 enum e1000_nvm_type type;
34907 enum e1000_nvm_override override;
34908 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34909 index f67cbd3..cef9e3d 100644
34910 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34911 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34912 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34913 s32 (*read_mac_addr)(struct e1000_hw *);
34914 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34915 };
34916 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34917
34918 struct e1000_phy_operations {
34919 s32 (*acquire)(struct e1000_hw *);
34920 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34921 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34922 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34923 };
34924 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34925
34926 struct e1000_nvm_operations {
34927 s32 (*acquire)(struct e1000_hw *);
34928 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34929 s32 (*update)(struct e1000_hw *);
34930 s32 (*validate)(struct e1000_hw *);
34931 };
34932 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34933
34934 struct e1000_info {
34935 s32 (*get_invariants)(struct e1000_hw *);
34936 @@ -350,7 +353,7 @@ struct e1000_info {
34937 extern const struct e1000_info e1000_82575_info;
34938
34939 struct e1000_mac_info {
34940 - struct e1000_mac_operations ops;
34941 + e1000_mac_operations_no_const ops;
34942
34943 u8 addr[6];
34944 u8 perm_addr[6];
34945 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34946 };
34947
34948 struct e1000_phy_info {
34949 - struct e1000_phy_operations ops;
34950 + e1000_phy_operations_no_const ops;
34951
34952 enum e1000_phy_type type;
34953
34954 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34955 };
34956
34957 struct e1000_nvm_info {
34958 - struct e1000_nvm_operations ops;
34959 + e1000_nvm_operations_no_const ops;
34960 enum e1000_nvm_type type;
34961 enum e1000_nvm_override override;
34962
34963 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34964 s32 (*check_for_ack)(struct e1000_hw *, u16);
34965 s32 (*check_for_rst)(struct e1000_hw *, u16);
34966 };
34967 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34968
34969 struct e1000_mbx_stats {
34970 u32 msgs_tx;
34971 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34972 };
34973
34974 struct e1000_mbx_info {
34975 - struct e1000_mbx_operations ops;
34976 + e1000_mbx_operations_no_const ops;
34977 struct e1000_mbx_stats stats;
34978 u32 timeout;
34979 u32 usec_delay;
34980 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34981 index 57db3c6..aa825fc 100644
34982 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34983 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34984 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34985 s32 (*read_mac_addr)(struct e1000_hw *);
34986 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34987 };
34988 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34989
34990 struct e1000_mac_info {
34991 - struct e1000_mac_operations ops;
34992 + e1000_mac_operations_no_const ops;
34993 u8 addr[6];
34994 u8 perm_addr[6];
34995
34996 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34997 s32 (*check_for_ack)(struct e1000_hw *);
34998 s32 (*check_for_rst)(struct e1000_hw *);
34999 };
35000 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35001
35002 struct e1000_mbx_stats {
35003 u32 msgs_tx;
35004 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35005 };
35006
35007 struct e1000_mbx_info {
35008 - struct e1000_mbx_operations ops;
35009 + e1000_mbx_operations_no_const ops;
35010 struct e1000_mbx_stats stats;
35011 u32 timeout;
35012 u32 usec_delay;
35013 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35014 index 9b95bef..7e254ee 100644
35015 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35016 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35017 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
35018 s32 (*update_checksum)(struct ixgbe_hw *);
35019 u16 (*calc_checksum)(struct ixgbe_hw *);
35020 };
35021 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35022
35023 struct ixgbe_mac_operations {
35024 s32 (*init_hw)(struct ixgbe_hw *);
35025 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
35026 /* Manageability interface */
35027 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
35028 };
35029 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35030
35031 struct ixgbe_phy_operations {
35032 s32 (*identify)(struct ixgbe_hw *);
35033 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
35034 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35035 s32 (*check_overtemp)(struct ixgbe_hw *);
35036 };
35037 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35038
35039 struct ixgbe_eeprom_info {
35040 - struct ixgbe_eeprom_operations ops;
35041 + ixgbe_eeprom_operations_no_const ops;
35042 enum ixgbe_eeprom_type type;
35043 u32 semaphore_delay;
35044 u16 word_size;
35045 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
35046
35047 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35048 struct ixgbe_mac_info {
35049 - struct ixgbe_mac_operations ops;
35050 + ixgbe_mac_operations_no_const ops;
35051 enum ixgbe_mac_type type;
35052 u8 addr[ETH_ALEN];
35053 u8 perm_addr[ETH_ALEN];
35054 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
35055 };
35056
35057 struct ixgbe_phy_info {
35058 - struct ixgbe_phy_operations ops;
35059 + ixgbe_phy_operations_no_const ops;
35060 struct mdio_if_info mdio;
35061 enum ixgbe_phy_type type;
35062 u32 id;
35063 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
35064 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35065 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35066 };
35067 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35068
35069 struct ixgbe_mbx_stats {
35070 u32 msgs_tx;
35071 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
35072 };
35073
35074 struct ixgbe_mbx_info {
35075 - struct ixgbe_mbx_operations ops;
35076 + ixgbe_mbx_operations_no_const ops;
35077 struct ixgbe_mbx_stats stats;
35078 u32 timeout;
35079 u32 usec_delay;
35080 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35081 index 25c951d..cc7cf33 100644
35082 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35083 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35084 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35085 s32 (*clear_vfta)(struct ixgbe_hw *);
35086 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35087 };
35088 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35089
35090 enum ixgbe_mac_type {
35091 ixgbe_mac_unknown = 0,
35092 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35093 };
35094
35095 struct ixgbe_mac_info {
35096 - struct ixgbe_mac_operations ops;
35097 + ixgbe_mac_operations_no_const ops;
35098 u8 addr[6];
35099 u8 perm_addr[6];
35100
35101 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35102 s32 (*check_for_ack)(struct ixgbe_hw *);
35103 s32 (*check_for_rst)(struct ixgbe_hw *);
35104 };
35105 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35106
35107 struct ixgbe_mbx_stats {
35108 u32 msgs_tx;
35109 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35110 };
35111
35112 struct ixgbe_mbx_info {
35113 - struct ixgbe_mbx_operations ops;
35114 + ixgbe_mbx_operations_no_const ops;
35115 struct ixgbe_mbx_stats stats;
35116 u32 timeout;
35117 u32 udelay;
35118 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35119 index 8bf22b6..7f5baaa 100644
35120 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35121 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35122 @@ -41,6 +41,7 @@
35123 #include <linux/slab.h>
35124 #include <linux/io-mapping.h>
35125 #include <linux/delay.h>
35126 +#include <linux/sched.h>
35127
35128 #include <linux/mlx4/device.h>
35129 #include <linux/mlx4/doorbell.h>
35130 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35131 index 5046a64..71ca936 100644
35132 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35133 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35134 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35135 void (*link_down)(struct __vxge_hw_device *devh);
35136 void (*crit_err)(struct __vxge_hw_device *devh,
35137 enum vxge_hw_event type, u64 ext_data);
35138 -};
35139 +} __no_const;
35140
35141 /*
35142 * struct __vxge_hw_blockpool_entry - Block private data structure
35143 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35144 index 4a518a3..936b334 100644
35145 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35146 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35147 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35148 struct vxge_hw_mempool_dma *dma_object,
35149 u32 index,
35150 u32 is_last);
35151 -};
35152 +} __no_const;
35153
35154 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35155 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35156 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35157 index bbacb37..d60887d 100644
35158 --- a/drivers/net/ethernet/realtek/r8169.c
35159 +++ b/drivers/net/ethernet/realtek/r8169.c
35160 @@ -695,17 +695,17 @@ struct rtl8169_private {
35161 struct mdio_ops {
35162 void (*write)(void __iomem *, int, int);
35163 int (*read)(void __iomem *, int);
35164 - } mdio_ops;
35165 + } __no_const mdio_ops;
35166
35167 struct pll_power_ops {
35168 void (*down)(struct rtl8169_private *);
35169 void (*up)(struct rtl8169_private *);
35170 - } pll_power_ops;
35171 + } __no_const pll_power_ops;
35172
35173 struct jumbo_ops {
35174 void (*enable)(struct rtl8169_private *);
35175 void (*disable)(struct rtl8169_private *);
35176 - } jumbo_ops;
35177 + } __no_const jumbo_ops;
35178
35179 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35180 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35181 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35182 index 5b118cd..858b523 100644
35183 --- a/drivers/net/ethernet/sis/sis190.c
35184 +++ b/drivers/net/ethernet/sis/sis190.c
35185 @@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35186 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35187 struct net_device *dev)
35188 {
35189 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35190 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35191 struct sis190_private *tp = netdev_priv(dev);
35192 struct pci_dev *isa_bridge;
35193 u8 reg, tmp8;
35194 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35195 index c07cfe9..81cbf7e 100644
35196 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35197 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35198 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35199
35200 writel(value, ioaddr + MMC_CNTRL);
35201
35202 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35203 - MMC_CNTRL, value);
35204 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35205 +// MMC_CNTRL, value);
35206 }
35207
35208 /* To mask all all interrupts.*/
35209 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35210 index dec5836..6d4db7d 100644
35211 --- a/drivers/net/hyperv/hyperv_net.h
35212 +++ b/drivers/net/hyperv/hyperv_net.h
35213 @@ -97,7 +97,7 @@ struct rndis_device {
35214
35215 enum rndis_device_state state;
35216 bool link_state;
35217 - atomic_t new_req_id;
35218 + atomic_unchecked_t new_req_id;
35219
35220 spinlock_t request_lock;
35221 struct list_head req_list;
35222 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35223 index 133b7fb..d58c559 100644
35224 --- a/drivers/net/hyperv/rndis_filter.c
35225 +++ b/drivers/net/hyperv/rndis_filter.c
35226 @@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35227 * template
35228 */
35229 set = &rndis_msg->msg.set_req;
35230 - set->req_id = atomic_inc_return(&dev->new_req_id);
35231 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35232
35233 /* Add to the request list */
35234 spin_lock_irqsave(&dev->request_lock, flags);
35235 @@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35236
35237 /* Setup the rndis set */
35238 halt = &request->request_msg.msg.halt_req;
35239 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35240 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35241
35242 /* Ignore return since this msg is optional. */
35243 rndis_filter_send_request(dev, request);
35244 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
35245 index 58dc117..f140c77 100644
35246 --- a/drivers/net/macvtap.c
35247 +++ b/drivers/net/macvtap.c
35248 @@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
35249 }
35250 base = (unsigned long)from->iov_base + offset1;
35251 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
35252 + if (i + size >= MAX_SKB_FRAGS)
35253 + return -EFAULT;
35254 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
35255 if ((num_pages != size) ||
35256 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
35257 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35258 index 486b404..0d6677d 100644
35259 --- a/drivers/net/ppp/ppp_generic.c
35260 +++ b/drivers/net/ppp/ppp_generic.c
35261 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35262 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35263 struct ppp_stats stats;
35264 struct ppp_comp_stats cstats;
35265 - char *vers;
35266
35267 switch (cmd) {
35268 case SIOCGPPPSTATS:
35269 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35270 break;
35271
35272 case SIOCGPPPVER:
35273 - vers = PPP_VERSION;
35274 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35275 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35276 break;
35277 err = 0;
35278 break;
35279 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35280 index 515f122..41dd273 100644
35281 --- a/drivers/net/tokenring/abyss.c
35282 +++ b/drivers/net/tokenring/abyss.c
35283 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
35284
35285 static int __init abyss_init (void)
35286 {
35287 - abyss_netdev_ops = tms380tr_netdev_ops;
35288 + pax_open_kernel();
35289 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35290
35291 - abyss_netdev_ops.ndo_open = abyss_open;
35292 - abyss_netdev_ops.ndo_stop = abyss_close;
35293 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35294 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35295 + pax_close_kernel();
35296
35297 return pci_register_driver(&abyss_driver);
35298 }
35299 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35300 index 6153cfd..cf69c1c 100644
35301 --- a/drivers/net/tokenring/madgemc.c
35302 +++ b/drivers/net/tokenring/madgemc.c
35303 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
35304
35305 static int __init madgemc_init (void)
35306 {
35307 - madgemc_netdev_ops = tms380tr_netdev_ops;
35308 - madgemc_netdev_ops.ndo_open = madgemc_open;
35309 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35310 + pax_open_kernel();
35311 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35312 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35313 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35314 + pax_close_kernel();
35315
35316 return mca_register_driver (&madgemc_driver);
35317 }
35318 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35319 index 8d362e6..f91cc52 100644
35320 --- a/drivers/net/tokenring/proteon.c
35321 +++ b/drivers/net/tokenring/proteon.c
35322 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
35323 struct platform_device *pdev;
35324 int i, num = 0, err = 0;
35325
35326 - proteon_netdev_ops = tms380tr_netdev_ops;
35327 - proteon_netdev_ops.ndo_open = proteon_open;
35328 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35329 + pax_open_kernel();
35330 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35331 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35332 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35333 + pax_close_kernel();
35334
35335 err = platform_driver_register(&proteon_driver);
35336 if (err)
35337 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35338 index 46db5c5..37c1536 100644
35339 --- a/drivers/net/tokenring/skisa.c
35340 +++ b/drivers/net/tokenring/skisa.c
35341 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35342 struct platform_device *pdev;
35343 int i, num = 0, err = 0;
35344
35345 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35346 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35347 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35348 + pax_open_kernel();
35349 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35350 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35351 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35352 + pax_close_kernel();
35353
35354 err = platform_driver_register(&sk_isa_driver);
35355 if (err)
35356 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35357 index e1324b4..e1b0041 100644
35358 --- a/drivers/net/usb/hso.c
35359 +++ b/drivers/net/usb/hso.c
35360 @@ -71,7 +71,7 @@
35361 #include <asm/byteorder.h>
35362 #include <linux/serial_core.h>
35363 #include <linux/serial.h>
35364 -
35365 +#include <asm/local.h>
35366
35367 #define MOD_AUTHOR "Option Wireless"
35368 #define MOD_DESCRIPTION "USB High Speed Option driver"
35369 @@ -257,7 +257,7 @@ struct hso_serial {
35370
35371 /* from usb_serial_port */
35372 struct tty_struct *tty;
35373 - int open_count;
35374 + local_t open_count;
35375 spinlock_t serial_lock;
35376
35377 int (*write_data) (struct hso_serial *serial);
35378 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35379 struct urb *urb;
35380
35381 urb = serial->rx_urb[0];
35382 - if (serial->open_count > 0) {
35383 + if (local_read(&serial->open_count) > 0) {
35384 count = put_rxbuf_data(urb, serial);
35385 if (count == -1)
35386 return;
35387 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35388 DUMP1(urb->transfer_buffer, urb->actual_length);
35389
35390 /* Anyone listening? */
35391 - if (serial->open_count == 0)
35392 + if (local_read(&serial->open_count) == 0)
35393 return;
35394
35395 if (status == 0) {
35396 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35397 spin_unlock_irq(&serial->serial_lock);
35398
35399 /* check for port already opened, if not set the termios */
35400 - serial->open_count++;
35401 - if (serial->open_count == 1) {
35402 + if (local_inc_return(&serial->open_count) == 1) {
35403 serial->rx_state = RX_IDLE;
35404 /* Force default termio settings */
35405 _hso_serial_set_termios(tty, NULL);
35406 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35407 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35408 if (result) {
35409 hso_stop_serial_device(serial->parent);
35410 - serial->open_count--;
35411 + local_dec(&serial->open_count);
35412 kref_put(&serial->parent->ref, hso_serial_ref_free);
35413 }
35414 } else {
35415 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35416
35417 /* reset the rts and dtr */
35418 /* do the actual close */
35419 - serial->open_count--;
35420 + local_dec(&serial->open_count);
35421
35422 - if (serial->open_count <= 0) {
35423 - serial->open_count = 0;
35424 + if (local_read(&serial->open_count) <= 0) {
35425 + local_set(&serial->open_count, 0);
35426 spin_lock_irq(&serial->serial_lock);
35427 if (serial->tty == tty) {
35428 serial->tty->driver_data = NULL;
35429 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35430
35431 /* the actual setup */
35432 spin_lock_irqsave(&serial->serial_lock, flags);
35433 - if (serial->open_count)
35434 + if (local_read(&serial->open_count))
35435 _hso_serial_set_termios(tty, old);
35436 else
35437 tty->termios = old;
35438 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35439 D1("Pending read interrupt on port %d\n", i);
35440 spin_lock(&serial->serial_lock);
35441 if (serial->rx_state == RX_IDLE &&
35442 - serial->open_count > 0) {
35443 + local_read(&serial->open_count) > 0) {
35444 /* Setup and send a ctrl req read on
35445 * port i */
35446 if (!serial->rx_urb_filled[0]) {
35447 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35448 /* Start all serial ports */
35449 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35450 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35451 - if (dev2ser(serial_table[i])->open_count) {
35452 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35453 result =
35454 hso_start_serial_device(serial_table[i], GFP_NOIO);
35455 hso_kick_transmit(dev2ser(serial_table[i]));
35456 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35457 index efc0111..79c8f5b 100644
35458 --- a/drivers/net/wireless/ath/ath.h
35459 +++ b/drivers/net/wireless/ath/ath.h
35460 @@ -119,6 +119,7 @@ struct ath_ops {
35461 void (*write_flush) (void *);
35462 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35463 };
35464 +typedef struct ath_ops __no_const ath_ops_no_const;
35465
35466 struct ath_common;
35467 struct ath_bus_ops;
35468 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
35469 index 8c5ce8b..abf101b 100644
35470 --- a/drivers/net/wireless/ath/ath5k/debug.c
35471 +++ b/drivers/net/wireless/ath/ath5k/debug.c
35472 @@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35473
35474 static ssize_t write_file_debug(struct file *file,
35475 const char __user *userbuf,
35476 + size_t count, loff_t *ppos) __size_overflow(3);
35477 +static ssize_t write_file_debug(struct file *file,
35478 + const char __user *userbuf,
35479 size_t count, loff_t *ppos)
35480 {
35481 struct ath5k_hw *ah = file->private_data;
35482 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35483 index 7b6417b..ab5db98 100644
35484 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35485 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35486 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35487 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35488 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35489
35490 - ACCESS_ONCE(ads->ds_link) = i->link;
35491 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35492 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35493 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35494
35495 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35496 ctl6 = SM(i->keytype, AR_EncrType);
35497 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35498
35499 if ((i->is_first || i->is_last) &&
35500 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35501 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35502 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35503 | set11nTries(i->rates, 1)
35504 | set11nTries(i->rates, 2)
35505 | set11nTries(i->rates, 3)
35506 | (i->dur_update ? AR_DurUpdateEna : 0)
35507 | SM(0, AR_BurstDur);
35508
35509 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35510 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35511 | set11nRate(i->rates, 1)
35512 | set11nRate(i->rates, 2)
35513 | set11nRate(i->rates, 3);
35514 } else {
35515 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35516 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35517 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35518 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35519 }
35520
35521 if (!i->is_first) {
35522 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35523 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35524 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35525 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35526 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35527 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35528 return;
35529 }
35530
35531 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35532 break;
35533 }
35534
35535 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35536 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35537 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35538 | SM(i->txpower, AR_XmitPower)
35539 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35540 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35541 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35542 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35543
35544 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35545 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35546 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35547 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35548
35549 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35550 return;
35551
35552 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35553 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35554 | set11nPktDurRTSCTS(i->rates, 1);
35555
35556 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35557 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35558 | set11nPktDurRTSCTS(i->rates, 3);
35559
35560 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35561 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35562 | set11nRateFlags(i->rates, 1)
35563 | set11nRateFlags(i->rates, 2)
35564 | set11nRateFlags(i->rates, 3)
35565 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35566 index 09b8c9d..905339e 100644
35567 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35568 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35569 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35570 (i->qcu << AR_TxQcuNum_S) | 0x17;
35571
35572 checksum += val;
35573 - ACCESS_ONCE(ads->info) = val;
35574 + ACCESS_ONCE_RW(ads->info) = val;
35575
35576 checksum += i->link;
35577 - ACCESS_ONCE(ads->link) = i->link;
35578 + ACCESS_ONCE_RW(ads->link) = i->link;
35579
35580 checksum += i->buf_addr[0];
35581 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35582 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35583 checksum += i->buf_addr[1];
35584 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35585 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35586 checksum += i->buf_addr[2];
35587 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35588 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35589 checksum += i->buf_addr[3];
35590 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35591 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35592
35593 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35594 - ACCESS_ONCE(ads->ctl3) = val;
35595 + ACCESS_ONCE_RW(ads->ctl3) = val;
35596 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35597 - ACCESS_ONCE(ads->ctl5) = val;
35598 + ACCESS_ONCE_RW(ads->ctl5) = val;
35599 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35600 - ACCESS_ONCE(ads->ctl7) = val;
35601 + ACCESS_ONCE_RW(ads->ctl7) = val;
35602 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35603 - ACCESS_ONCE(ads->ctl9) = val;
35604 + ACCESS_ONCE_RW(ads->ctl9) = val;
35605
35606 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35607 - ACCESS_ONCE(ads->ctl10) = checksum;
35608 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35609
35610 if (i->is_first || i->is_last) {
35611 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35612 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35613 | set11nTries(i->rates, 1)
35614 | set11nTries(i->rates, 2)
35615 | set11nTries(i->rates, 3)
35616 | (i->dur_update ? AR_DurUpdateEna : 0)
35617 | SM(0, AR_BurstDur);
35618
35619 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35620 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35621 | set11nRate(i->rates, 1)
35622 | set11nRate(i->rates, 2)
35623 | set11nRate(i->rates, 3);
35624 } else {
35625 - ACCESS_ONCE(ads->ctl13) = 0;
35626 - ACCESS_ONCE(ads->ctl14) = 0;
35627 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35628 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35629 }
35630
35631 ads->ctl20 = 0;
35632 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35633
35634 ctl17 = SM(i->keytype, AR_EncrType);
35635 if (!i->is_first) {
35636 - ACCESS_ONCE(ads->ctl11) = 0;
35637 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35638 - ACCESS_ONCE(ads->ctl15) = 0;
35639 - ACCESS_ONCE(ads->ctl16) = 0;
35640 - ACCESS_ONCE(ads->ctl17) = ctl17;
35641 - ACCESS_ONCE(ads->ctl18) = 0;
35642 - ACCESS_ONCE(ads->ctl19) = 0;
35643 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35644 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35645 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35646 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35647 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35648 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35649 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35650 return;
35651 }
35652
35653 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35654 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35655 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35656 | SM(i->txpower, AR_XmitPower)
35657 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35658 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35659 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35660 ctl12 |= SM(val, AR_PAPRDChainMask);
35661
35662 - ACCESS_ONCE(ads->ctl12) = ctl12;
35663 - ACCESS_ONCE(ads->ctl17) = ctl17;
35664 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35665 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35666
35667 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35668 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35669 | set11nPktDurRTSCTS(i->rates, 1);
35670
35671 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35672 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35673 | set11nPktDurRTSCTS(i->rates, 3);
35674
35675 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35676 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35677 | set11nRateFlags(i->rates, 1)
35678 | set11nRateFlags(i->rates, 2)
35679 | set11nRateFlags(i->rates, 3)
35680 | SM(i->rtscts_rate, AR_RTSCTSRate);
35681
35682 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35683 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35684 }
35685
35686 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35687 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
35688 index 68d972b..1d9205b 100644
35689 --- a/drivers/net/wireless/ath/ath9k/debug.c
35690 +++ b/drivers/net/wireless/ath/ath9k/debug.c
35691 @@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35692 }
35693
35694 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35695 + size_t count, loff_t *ppos) __size_overflow(3);
35696 +static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35697 size_t count, loff_t *ppos)
35698 {
35699 struct ath_softc *sc = file->private_data;
35700 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35701 index d3ff33c..c98bcda 100644
35702 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35703 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35704 @@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35705 }
35706
35707 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35708 + size_t count, loff_t *ppos) __size_overflow(3);
35709 +static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35710 size_t count, loff_t *ppos)
35711 {
35712 struct ath9k_htc_priv *priv = file->private_data;
35713 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35714 index c8261d4..8d88929 100644
35715 --- a/drivers/net/wireless/ath/ath9k/hw.h
35716 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35717 @@ -773,7 +773,7 @@ struct ath_hw_private_ops {
35718
35719 /* ANI */
35720 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35721 -};
35722 +} __no_const;
35723
35724 /**
35725 * struct ath_hw_ops - callbacks used by hardware code and driver code
35726 @@ -803,7 +803,7 @@ struct ath_hw_ops {
35727 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35728 struct ath_hw_antcomb_conf *antconf);
35729
35730 -};
35731 +} __no_const;
35732
35733 struct ath_nf_limits {
35734 s16 max;
35735 @@ -823,7 +823,7 @@ enum ath_cal_list {
35736 #define AH_FASTCC 0x4
35737
35738 struct ath_hw {
35739 - struct ath_ops reg_ops;
35740 + ath_ops_no_const reg_ops;
35741
35742 struct ieee80211_hw *hw;
35743 struct ath_common common;
35744 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35745 index af00e2c..ab04d34 100644
35746 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35747 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35748 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35749 void (*carrsuppr)(struct brcms_phy *);
35750 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35751 void (*detach)(struct brcms_phy *);
35752 -};
35753 +} __no_const;
35754
35755 struct brcms_phy {
35756 struct brcms_phy_pub pubpi_ro;
35757 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35758 index a2ec369..36fdf14 100644
35759 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35760 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35761 @@ -3646,7 +3646,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35762 */
35763 if (il3945_mod_params.disable_hw_scan) {
35764 D_INFO("Disabling hw_scan\n");
35765 - il3945_hw_ops.hw_scan = NULL;
35766 + pax_open_kernel();
35767 + *(void **)&il3945_hw_ops.hw_scan = NULL;
35768 + pax_close_kernel();
35769 }
35770
35771 D_INFO("*** LOAD DRIVER ***\n");
35772 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
35773 index f8fc239..8cade22 100644
35774 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
35775 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
35776 @@ -86,8 +86,8 @@ do { \
35777 } while (0)
35778
35779 #else
35780 -#define IWL_DEBUG(m, level, fmt, args...)
35781 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
35782 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
35783 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
35784 #define iwl_print_hex_dump(m, level, p, len)
35785 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
35786 do { \
35787 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35788 index 4b9e730..7603659 100644
35789 --- a/drivers/net/wireless/mac80211_hwsim.c
35790 +++ b/drivers/net/wireless/mac80211_hwsim.c
35791 @@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
35792 return -EINVAL;
35793
35794 if (fake_hw_scan) {
35795 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35796 - mac80211_hwsim_ops.sw_scan_start = NULL;
35797 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35798 + pax_open_kernel();
35799 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35800 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35801 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35802 + pax_close_kernel();
35803 }
35804
35805 spin_lock_init(&hwsim_radio_lock);
35806 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35807 index 3186aa4..b35b09f 100644
35808 --- a/drivers/net/wireless/mwifiex/main.h
35809 +++ b/drivers/net/wireless/mwifiex/main.h
35810 @@ -536,7 +536,7 @@ struct mwifiex_if_ops {
35811 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35812 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35813 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35814 -};
35815 +} __no_const;
35816
35817 struct mwifiex_adapter {
35818 u8 iface_type;
35819 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35820 index a330c69..a81540f 100644
35821 --- a/drivers/net/wireless/rndis_wlan.c
35822 +++ b/drivers/net/wireless/rndis_wlan.c
35823 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35824
35825 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35826
35827 - if (rts_threshold < 0 || rts_threshold > 2347)
35828 + if (rts_threshold > 2347)
35829 rts_threshold = 2347;
35830
35831 tmp = cpu_to_le32(rts_threshold);
35832 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35833 index a77f1bb..c608b2b 100644
35834 --- a/drivers/net/wireless/wl1251/wl1251.h
35835 +++ b/drivers/net/wireless/wl1251/wl1251.h
35836 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35837 void (*reset)(struct wl1251 *wl);
35838 void (*enable_irq)(struct wl1251 *wl);
35839 void (*disable_irq)(struct wl1251 *wl);
35840 -};
35841 +} __no_const;
35842
35843 struct wl1251 {
35844 struct ieee80211_hw *hw;
35845 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35846 index f34b5b2..b5abb9f 100644
35847 --- a/drivers/oprofile/buffer_sync.c
35848 +++ b/drivers/oprofile/buffer_sync.c
35849 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35850 if (cookie == NO_COOKIE)
35851 offset = pc;
35852 if (cookie == INVALID_COOKIE) {
35853 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35854 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35855 offset = pc;
35856 }
35857 if (cookie != last_cookie) {
35858 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35859 /* add userspace sample */
35860
35861 if (!mm) {
35862 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35863 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35864 return 0;
35865 }
35866
35867 cookie = lookup_dcookie(mm, s->eip, &offset);
35868
35869 if (cookie == INVALID_COOKIE) {
35870 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35871 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35872 return 0;
35873 }
35874
35875 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35876 /* ignore backtraces if failed to add a sample */
35877 if (state == sb_bt_start) {
35878 state = sb_bt_ignore;
35879 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35880 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35881 }
35882 }
35883 release_mm(mm);
35884 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35885 index c0cc4e7..44d4e54 100644
35886 --- a/drivers/oprofile/event_buffer.c
35887 +++ b/drivers/oprofile/event_buffer.c
35888 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35889 }
35890
35891 if (buffer_pos == buffer_size) {
35892 - atomic_inc(&oprofile_stats.event_lost_overflow);
35893 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35894 return;
35895 }
35896
35897 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35898 index ed2c3ec..deda85a 100644
35899 --- a/drivers/oprofile/oprof.c
35900 +++ b/drivers/oprofile/oprof.c
35901 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35902 if (oprofile_ops.switch_events())
35903 return;
35904
35905 - atomic_inc(&oprofile_stats.multiplex_counter);
35906 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35907 start_switch_worker();
35908 }
35909
35910 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
35911 index 84a208d..f07d177 100644
35912 --- a/drivers/oprofile/oprofile_files.c
35913 +++ b/drivers/oprofile/oprofile_files.c
35914 @@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
35915
35916
35917 static ssize_t timeout_write(struct file *file, char const __user *buf,
35918 + size_t count, loff_t *offset) __size_overflow(3);
35919 +static ssize_t timeout_write(struct file *file, char const __user *buf,
35920 size_t count, loff_t *offset)
35921 {
35922 unsigned long val;
35923 @@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
35924 }
35925
35926
35927 +static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35928 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35929 {
35930 unsigned long val;
35931 @@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = {
35932 };
35933
35934
35935 +static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35936 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
35937 {
35938 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
35939 }
35940
35941
35942 +static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35943 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35944 {
35945 unsigned long val;
35946 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35947 index 917d28e..d62d981 100644
35948 --- a/drivers/oprofile/oprofile_stats.c
35949 +++ b/drivers/oprofile/oprofile_stats.c
35950 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35951 cpu_buf->sample_invalid_eip = 0;
35952 }
35953
35954 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35955 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35956 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35957 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35958 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35959 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35960 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35961 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35962 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35963 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35964 }
35965
35966
35967 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35968 index 38b6fc0..b5cbfce 100644
35969 --- a/drivers/oprofile/oprofile_stats.h
35970 +++ b/drivers/oprofile/oprofile_stats.h
35971 @@ -13,11 +13,11 @@
35972 #include <linux/atomic.h>
35973
35974 struct oprofile_stat_struct {
35975 - atomic_t sample_lost_no_mm;
35976 - atomic_t sample_lost_no_mapping;
35977 - atomic_t bt_lost_no_mapping;
35978 - atomic_t event_lost_overflow;
35979 - atomic_t multiplex_counter;
35980 + atomic_unchecked_t sample_lost_no_mm;
35981 + atomic_unchecked_t sample_lost_no_mapping;
35982 + atomic_unchecked_t bt_lost_no_mapping;
35983 + atomic_unchecked_t event_lost_overflow;
35984 + atomic_unchecked_t multiplex_counter;
35985 };
35986
35987 extern struct oprofile_stat_struct oprofile_stats;
35988 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35989 index 2f0aa0f..d5246c3 100644
35990 --- a/drivers/oprofile/oprofilefs.c
35991 +++ b/drivers/oprofile/oprofilefs.c
35992 @@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
35993 }
35994
35995
35996 +static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35997 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35998 {
35999 unsigned long value;
36000 @@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = {
36001
36002
36003 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36004 - char const *name, atomic_t *val)
36005 + char const *name, atomic_unchecked_t *val)
36006 {
36007 return __oprofilefs_create_file(sb, root, name,
36008 &atomic_ro_fops, 0444, val);
36009 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36010 index 3f56bc0..707d642 100644
36011 --- a/drivers/parport/procfs.c
36012 +++ b/drivers/parport/procfs.c
36013 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36014
36015 *ppos += len;
36016
36017 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36018 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36019 }
36020
36021 #ifdef CONFIG_PARPORT_1284
36022 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36023
36024 *ppos += len;
36025
36026 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36027 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36028 }
36029 #endif /* IEEE1284.3 support. */
36030
36031 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36032 index 9fff878..ad0ad53 100644
36033 --- a/drivers/pci/hotplug/cpci_hotplug.h
36034 +++ b/drivers/pci/hotplug/cpci_hotplug.h
36035 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36036 int (*hardware_test) (struct slot* slot, u32 value);
36037 u8 (*get_power) (struct slot* slot);
36038 int (*set_power) (struct slot* slot, int value);
36039 -};
36040 +} __no_const;
36041
36042 struct cpci_hp_controller {
36043 unsigned int irq;
36044 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36045 index 76ba8a1..20ca857 100644
36046 --- a/drivers/pci/hotplug/cpqphp_nvram.c
36047 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
36048 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36049
36050 void compaq_nvram_init (void __iomem *rom_start)
36051 {
36052 +
36053 +#ifndef CONFIG_PAX_KERNEXEC
36054 if (rom_start) {
36055 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36056 }
36057 +#endif
36058 +
36059 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36060
36061 /* initialize our int15 lock */
36062 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36063 index 2275162..95f1a92 100644
36064 --- a/drivers/pci/pcie/aspm.c
36065 +++ b/drivers/pci/pcie/aspm.c
36066 @@ -27,9 +27,9 @@
36067 #define MODULE_PARAM_PREFIX "pcie_aspm."
36068
36069 /* Note: those are not register definitions */
36070 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36071 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36072 -#define ASPM_STATE_L1 (4) /* L1 state */
36073 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36074 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36075 +#define ASPM_STATE_L1 (4U) /* L1 state */
36076 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36077 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36078
36079 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36080 index 71eac9c..2de27ef 100644
36081 --- a/drivers/pci/probe.c
36082 +++ b/drivers/pci/probe.c
36083 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36084 u32 l, sz, mask;
36085 u16 orig_cmd;
36086
36087 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36088 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36089
36090 if (!dev->mmio_always_on) {
36091 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36092 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36093 index 27911b5..5b6db88 100644
36094 --- a/drivers/pci/proc.c
36095 +++ b/drivers/pci/proc.c
36096 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36097 static int __init pci_proc_init(void)
36098 {
36099 struct pci_dev *dev = NULL;
36100 +
36101 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36102 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36103 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36104 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36105 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36106 +#endif
36107 +#else
36108 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36109 +#endif
36110 proc_create("devices", 0, proc_bus_pci_dir,
36111 &proc_bus_pci_dev_operations);
36112 proc_initialized = 1;
36113 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
36114 index 6f966d6..68e18ed 100644
36115 --- a/drivers/platform/x86/asus_acpi.c
36116 +++ b/drivers/platform/x86/asus_acpi.c
36117 @@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
36118 }
36119
36120 static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36121 + size_t count, loff_t *pos) __size_overflow(3);
36122 +static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36123 size_t count, loff_t *pos)
36124 {
36125 int rv, value;
36126 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36127 index ea0c607..58c4628 100644
36128 --- a/drivers/platform/x86/thinkpad_acpi.c
36129 +++ b/drivers/platform/x86/thinkpad_acpi.c
36130 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36131 return 0;
36132 }
36133
36134 -void static hotkey_mask_warn_incomplete_mask(void)
36135 +static void hotkey_mask_warn_incomplete_mask(void)
36136 {
36137 /* log only what the user can fix... */
36138 const u32 wantedmask = hotkey_driver_mask &
36139 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36140 }
36141 }
36142
36143 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36144 - struct tp_nvram_state *newn,
36145 - const u32 event_mask)
36146 -{
36147 -
36148 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36149 do { \
36150 if ((event_mask & (1 << __scancode)) && \
36151 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36152 tpacpi_hotkey_send_key(__scancode); \
36153 } while (0)
36154
36155 - void issue_volchange(const unsigned int oldvol,
36156 - const unsigned int newvol)
36157 - {
36158 - unsigned int i = oldvol;
36159 +static void issue_volchange(const unsigned int oldvol,
36160 + const unsigned int newvol,
36161 + const u32 event_mask)
36162 +{
36163 + unsigned int i = oldvol;
36164
36165 - while (i > newvol) {
36166 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36167 - i--;
36168 - }
36169 - while (i < newvol) {
36170 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36171 - i++;
36172 - }
36173 + while (i > newvol) {
36174 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36175 + i--;
36176 }
36177 + while (i < newvol) {
36178 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36179 + i++;
36180 + }
36181 +}
36182
36183 - void issue_brightnesschange(const unsigned int oldbrt,
36184 - const unsigned int newbrt)
36185 - {
36186 - unsigned int i = oldbrt;
36187 +static void issue_brightnesschange(const unsigned int oldbrt,
36188 + const unsigned int newbrt,
36189 + const u32 event_mask)
36190 +{
36191 + unsigned int i = oldbrt;
36192
36193 - while (i > newbrt) {
36194 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36195 - i--;
36196 - }
36197 - while (i < newbrt) {
36198 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36199 - i++;
36200 - }
36201 + while (i > newbrt) {
36202 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36203 + i--;
36204 + }
36205 + while (i < newbrt) {
36206 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36207 + i++;
36208 }
36209 +}
36210
36211 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36212 + struct tp_nvram_state *newn,
36213 + const u32 event_mask)
36214 +{
36215 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36216 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36217 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36218 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36219 oldn->volume_level != newn->volume_level) {
36220 /* recently muted, or repeated mute keypress, or
36221 * multiple presses ending in mute */
36222 - issue_volchange(oldn->volume_level, newn->volume_level);
36223 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36224 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36225 }
36226 } else {
36227 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36228 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36229 }
36230 if (oldn->volume_level != newn->volume_level) {
36231 - issue_volchange(oldn->volume_level, newn->volume_level);
36232 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36233 } else if (oldn->volume_toggle != newn->volume_toggle) {
36234 /* repeated vol up/down keypress at end of scale ? */
36235 if (newn->volume_level == 0)
36236 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36237 /* handle brightness */
36238 if (oldn->brightness_level != newn->brightness_level) {
36239 issue_brightnesschange(oldn->brightness_level,
36240 - newn->brightness_level);
36241 + newn->brightness_level,
36242 + event_mask);
36243 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36244 /* repeated key presses that didn't change state */
36245 if (newn->brightness_level == 0)
36246 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36247 && !tp_features.bright_unkfw)
36248 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36249 }
36250 +}
36251
36252 #undef TPACPI_COMPARE_KEY
36253 #undef TPACPI_MAY_SEND_KEY
36254 -}
36255
36256 /*
36257 * Polling driver
36258 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
36259 index dcdc1f4..85cee16 100644
36260 --- a/drivers/platform/x86/toshiba_acpi.c
36261 +++ b/drivers/platform/x86/toshiba_acpi.c
36262 @@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd)
36263 }
36264
36265 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36266 + size_t count, loff_t *pos) __size_overflow(3);
36267 +static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36268 size_t count, loff_t *pos)
36269 {
36270 struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
36271 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36272 index b859d16..5cc6b1a 100644
36273 --- a/drivers/pnp/pnpbios/bioscalls.c
36274 +++ b/drivers/pnp/pnpbios/bioscalls.c
36275 @@ -59,7 +59,7 @@ do { \
36276 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36277 } while(0)
36278
36279 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36280 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36281 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36282
36283 /*
36284 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36285
36286 cpu = get_cpu();
36287 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36288 +
36289 + pax_open_kernel();
36290 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36291 + pax_close_kernel();
36292
36293 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36294 spin_lock_irqsave(&pnp_bios_lock, flags);
36295 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36296 :"memory");
36297 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36298
36299 + pax_open_kernel();
36300 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36301 + pax_close_kernel();
36302 +
36303 put_cpu();
36304
36305 /* If we get here and this is set then the PnP BIOS faulted on us. */
36306 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36307 return status;
36308 }
36309
36310 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36311 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36312 {
36313 int i;
36314
36315 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36316 pnp_bios_callpoint.offset = header->fields.pm16offset;
36317 pnp_bios_callpoint.segment = PNP_CS16;
36318
36319 + pax_open_kernel();
36320 +
36321 for_each_possible_cpu(i) {
36322 struct desc_struct *gdt = get_cpu_gdt_table(i);
36323 if (!gdt)
36324 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36325 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36326 (unsigned long)__va(header->fields.pm16dseg));
36327 }
36328 +
36329 + pax_close_kernel();
36330 }
36331 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36332 index b0ecacb..7c9da2e 100644
36333 --- a/drivers/pnp/resource.c
36334 +++ b/drivers/pnp/resource.c
36335 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36336 return 1;
36337
36338 /* check if the resource is valid */
36339 - if (*irq < 0 || *irq > 15)
36340 + if (*irq > 15)
36341 return 0;
36342
36343 /* check if the resource is reserved */
36344 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36345 return 1;
36346
36347 /* check if the resource is valid */
36348 - if (*dma < 0 || *dma == 4 || *dma > 7)
36349 + if (*dma == 4 || *dma > 7)
36350 return 0;
36351
36352 /* check if the resource is reserved */
36353 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36354 index 1ed6ea0..77c0bd2 100644
36355 --- a/drivers/power/bq27x00_battery.c
36356 +++ b/drivers/power/bq27x00_battery.c
36357 @@ -72,7 +72,7 @@
36358 struct bq27x00_device_info;
36359 struct bq27x00_access_methods {
36360 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36361 -};
36362 +} __no_const;
36363
36364 enum bq27x00_chip { BQ27000, BQ27500 };
36365
36366 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36367 index a838e66..a9e1665 100644
36368 --- a/drivers/regulator/max8660.c
36369 +++ b/drivers/regulator/max8660.c
36370 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36371 max8660->shadow_regs[MAX8660_OVER1] = 5;
36372 } else {
36373 /* Otherwise devices can be toggled via software */
36374 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36375 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36376 + pax_open_kernel();
36377 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36378 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36379 + pax_close_kernel();
36380 }
36381
36382 /*
36383 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36384 index e8cfc99..072aee2 100644
36385 --- a/drivers/regulator/mc13892-regulator.c
36386 +++ b/drivers/regulator/mc13892-regulator.c
36387 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36388 }
36389 mc13xxx_unlock(mc13892);
36390
36391 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36392 + pax_open_kernel();
36393 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36394 = mc13892_vcam_set_mode;
36395 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36396 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36397 = mc13892_vcam_get_mode;
36398 + pax_close_kernel();
36399
36400 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36401 ARRAY_SIZE(mc13892_regulators));
36402 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36403 index cace6d3..f623fda 100644
36404 --- a/drivers/rtc/rtc-dev.c
36405 +++ b/drivers/rtc/rtc-dev.c
36406 @@ -14,6 +14,7 @@
36407 #include <linux/module.h>
36408 #include <linux/rtc.h>
36409 #include <linux/sched.h>
36410 +#include <linux/grsecurity.h>
36411 #include "rtc-core.h"
36412
36413 static dev_t rtc_devt;
36414 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36415 if (copy_from_user(&tm, uarg, sizeof(tm)))
36416 return -EFAULT;
36417
36418 + gr_log_timechange();
36419 +
36420 return rtc_set_time(rtc, &tm);
36421
36422 case RTC_PIE_ON:
36423 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36424 index ffb5878..e6d785c 100644
36425 --- a/drivers/scsi/aacraid/aacraid.h
36426 +++ b/drivers/scsi/aacraid/aacraid.h
36427 @@ -492,7 +492,7 @@ struct adapter_ops
36428 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36429 /* Administrative operations */
36430 int (*adapter_comm)(struct aac_dev * dev, int comm);
36431 -};
36432 +} __no_const;
36433
36434 /*
36435 * Define which interrupt handler needs to be installed
36436 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36437 index 705e13e..91c873c 100644
36438 --- a/drivers/scsi/aacraid/linit.c
36439 +++ b/drivers/scsi/aacraid/linit.c
36440 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36441 #elif defined(__devinitconst)
36442 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36443 #else
36444 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36445 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36446 #endif
36447 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36448 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36449 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36450 index d5ff142..49c0ebb 100644
36451 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36452 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36453 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36454 .lldd_control_phy = asd_control_phy,
36455 };
36456
36457 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36458 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36459 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36460 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36461 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36462 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36463 index a796de9..1ef20e1 100644
36464 --- a/drivers/scsi/bfa/bfa.h
36465 +++ b/drivers/scsi/bfa/bfa.h
36466 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36467 u32 *end);
36468 int cpe_vec_q0;
36469 int rme_vec_q0;
36470 -};
36471 +} __no_const;
36472 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36473
36474 struct bfa_faa_cbfn_s {
36475 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36476 index f0f80e2..8ec946b 100644
36477 --- a/drivers/scsi/bfa/bfa_fcpim.c
36478 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36479 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36480
36481 bfa_iotag_attach(fcp);
36482
36483 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36484 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36485 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36486 (fcp->num_itns * sizeof(struct bfa_itn_s));
36487 memset(fcp->itn_arr, 0,
36488 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36489 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36490 {
36491 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36492 - struct bfa_itn_s *itn;
36493 + bfa_itn_s_no_const *itn;
36494
36495 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36496 itn->isr = isr;
36497 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36498 index 36f26da..38a34a8 100644
36499 --- a/drivers/scsi/bfa/bfa_fcpim.h
36500 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36501 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36502 struct bfa_itn_s {
36503 bfa_isr_func_t isr;
36504 };
36505 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36506
36507 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36508 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36509 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36510 struct list_head iotag_tio_free_q; /* free IO resources */
36511 struct list_head iotag_unused_q; /* unused IO resources*/
36512 struct bfa_iotag_s *iotag_arr;
36513 - struct bfa_itn_s *itn_arr;
36514 + bfa_itn_s_no_const *itn_arr;
36515 int num_ioim_reqs;
36516 int num_fwtio_reqs;
36517 int num_itns;
36518 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36519 index 546d46b..642fa5b 100644
36520 --- a/drivers/scsi/bfa/bfa_ioc.h
36521 +++ b/drivers/scsi/bfa/bfa_ioc.h
36522 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36523 bfa_ioc_disable_cbfn_t disable_cbfn;
36524 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36525 bfa_ioc_reset_cbfn_t reset_cbfn;
36526 -};
36527 +} __no_const;
36528
36529 /*
36530 * IOC event notification mechanism.
36531 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36532 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36533 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36534 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36535 -};
36536 +} __no_const;
36537
36538 /*
36539 * Queue element to wait for room in request queue. FIFO order is
36540 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36541 index 351dc0b..951dc32 100644
36542 --- a/drivers/scsi/hosts.c
36543 +++ b/drivers/scsi/hosts.c
36544 @@ -42,7 +42,7 @@
36545 #include "scsi_logging.h"
36546
36547
36548 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36549 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36550
36551
36552 static void scsi_host_cls_release(struct device *dev)
36553 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36554 * subtract one because we increment first then return, but we need to
36555 * know what the next host number was before increment
36556 */
36557 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36558 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36559 shost->dma_channel = 0xff;
36560
36561 /* These three are default values which can be overridden */
36562 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36563 index b96962c..0c82ec2 100644
36564 --- a/drivers/scsi/hpsa.c
36565 +++ b/drivers/scsi/hpsa.c
36566 @@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
36567 u32 a;
36568
36569 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36570 - return h->access.command_completed(h);
36571 + return h->access->command_completed(h);
36572
36573 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36574 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36575 @@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
36576 while (!list_empty(&h->reqQ)) {
36577 c = list_entry(h->reqQ.next, struct CommandList, list);
36578 /* can't do anything if fifo is full */
36579 - if ((h->access.fifo_full(h))) {
36580 + if ((h->access->fifo_full(h))) {
36581 dev_warn(&h->pdev->dev, "fifo full\n");
36582 break;
36583 }
36584 @@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
36585 h->Qdepth--;
36586
36587 /* Tell the controller execute command */
36588 - h->access.submit_command(h, c);
36589 + h->access->submit_command(h, c);
36590
36591 /* Put job onto the completed Q */
36592 addQ(&h->cmpQ, c);
36593 @@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
36594
36595 static inline unsigned long get_next_completion(struct ctlr_info *h)
36596 {
36597 - return h->access.command_completed(h);
36598 + return h->access->command_completed(h);
36599 }
36600
36601 static inline bool interrupt_pending(struct ctlr_info *h)
36602 {
36603 - return h->access.intr_pending(h);
36604 + return h->access->intr_pending(h);
36605 }
36606
36607 static inline long interrupt_not_for_us(struct ctlr_info *h)
36608 {
36609 - return (h->access.intr_pending(h) == 0) ||
36610 + return (h->access->intr_pending(h) == 0) ||
36611 (h->interrupts_enabled == 0);
36612 }
36613
36614 @@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36615 if (prod_index < 0)
36616 return -ENODEV;
36617 h->product_name = products[prod_index].product_name;
36618 - h->access = *(products[prod_index].access);
36619 + h->access = products[prod_index].access;
36620
36621 if (hpsa_board_disabled(h->pdev)) {
36622 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36623 @@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36624
36625 assert_spin_locked(&lockup_detector_lock);
36626 remove_ctlr_from_lockup_detector_list(h);
36627 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36628 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36629 spin_lock_irqsave(&h->lock, flags);
36630 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36631 spin_unlock_irqrestore(&h->lock, flags);
36632 @@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
36633 }
36634
36635 /* make sure the board interrupts are off */
36636 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36637 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36638
36639 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36640 goto clean2;
36641 @@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
36642 * fake ones to scoop up any residual completions.
36643 */
36644 spin_lock_irqsave(&h->lock, flags);
36645 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36646 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36647 spin_unlock_irqrestore(&h->lock, flags);
36648 free_irq(h->intr[h->intr_mode], h);
36649 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36650 @@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
36651 dev_info(&h->pdev->dev, "Board READY.\n");
36652 dev_info(&h->pdev->dev,
36653 "Waiting for stale completions to drain.\n");
36654 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36655 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36656 msleep(10000);
36657 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36658 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36659
36660 rc = controller_reset_failed(h->cfgtable);
36661 if (rc)
36662 @@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
36663 }
36664
36665 /* Turn the interrupts on so we can service requests */
36666 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36667 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36668
36669 hpsa_hba_inquiry(h);
36670 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36671 @@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36672 * To write all data in the battery backed cache to disks
36673 */
36674 hpsa_flush_cache(h);
36675 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36676 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36677 free_irq(h->intr[h->intr_mode], h);
36678 #ifdef CONFIG_PCI_MSI
36679 if (h->msix_vector)
36680 @@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36681 return;
36682 }
36683 /* Change the access methods to the performant access methods */
36684 - h->access = SA5_performant_access;
36685 + h->access = &SA5_performant_access;
36686 h->transMethod = CFGTBL_Trans_Performant;
36687 }
36688
36689 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36690 index 91edafb..a9b88ec 100644
36691 --- a/drivers/scsi/hpsa.h
36692 +++ b/drivers/scsi/hpsa.h
36693 @@ -73,7 +73,7 @@ struct ctlr_info {
36694 unsigned int msix_vector;
36695 unsigned int msi_vector;
36696 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36697 - struct access_method access;
36698 + struct access_method *access;
36699
36700 /* queue and queue Info */
36701 struct list_head reqQ;
36702 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36703 index f2df059..a3a9930 100644
36704 --- a/drivers/scsi/ips.h
36705 +++ b/drivers/scsi/ips.h
36706 @@ -1027,7 +1027,7 @@ typedef struct {
36707 int (*intr)(struct ips_ha *);
36708 void (*enableint)(struct ips_ha *);
36709 uint32_t (*statupd)(struct ips_ha *);
36710 -} ips_hw_func_t;
36711 +} __no_const ips_hw_func_t;
36712
36713 typedef struct ips_ha {
36714 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36715 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36716 index 4d70d96..84d0573 100644
36717 --- a/drivers/scsi/libfc/fc_exch.c
36718 +++ b/drivers/scsi/libfc/fc_exch.c
36719 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36720 * all together if not used XXX
36721 */
36722 struct {
36723 - atomic_t no_free_exch;
36724 - atomic_t no_free_exch_xid;
36725 - atomic_t xid_not_found;
36726 - atomic_t xid_busy;
36727 - atomic_t seq_not_found;
36728 - atomic_t non_bls_resp;
36729 + atomic_unchecked_t no_free_exch;
36730 + atomic_unchecked_t no_free_exch_xid;
36731 + atomic_unchecked_t xid_not_found;
36732 + atomic_unchecked_t xid_busy;
36733 + atomic_unchecked_t seq_not_found;
36734 + atomic_unchecked_t non_bls_resp;
36735 } stats;
36736 };
36737
36738 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36739 /* allocate memory for exchange */
36740 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36741 if (!ep) {
36742 - atomic_inc(&mp->stats.no_free_exch);
36743 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36744 goto out;
36745 }
36746 memset(ep, 0, sizeof(*ep));
36747 @@ -780,7 +780,7 @@ out:
36748 return ep;
36749 err:
36750 spin_unlock_bh(&pool->lock);
36751 - atomic_inc(&mp->stats.no_free_exch_xid);
36752 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36753 mempool_free(ep, mp->ep_pool);
36754 return NULL;
36755 }
36756 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36757 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36758 ep = fc_exch_find(mp, xid);
36759 if (!ep) {
36760 - atomic_inc(&mp->stats.xid_not_found);
36761 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36762 reject = FC_RJT_OX_ID;
36763 goto out;
36764 }
36765 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36766 ep = fc_exch_find(mp, xid);
36767 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36768 if (ep) {
36769 - atomic_inc(&mp->stats.xid_busy);
36770 + atomic_inc_unchecked(&mp->stats.xid_busy);
36771 reject = FC_RJT_RX_ID;
36772 goto rel;
36773 }
36774 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36775 }
36776 xid = ep->xid; /* get our XID */
36777 } else if (!ep) {
36778 - atomic_inc(&mp->stats.xid_not_found);
36779 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36780 reject = FC_RJT_RX_ID; /* XID not found */
36781 goto out;
36782 }
36783 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36784 } else {
36785 sp = &ep->seq;
36786 if (sp->id != fh->fh_seq_id) {
36787 - atomic_inc(&mp->stats.seq_not_found);
36788 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36789 if (f_ctl & FC_FC_END_SEQ) {
36790 /*
36791 * Update sequence_id based on incoming last
36792 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36793
36794 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36795 if (!ep) {
36796 - atomic_inc(&mp->stats.xid_not_found);
36797 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36798 goto out;
36799 }
36800 if (ep->esb_stat & ESB_ST_COMPLETE) {
36801 - atomic_inc(&mp->stats.xid_not_found);
36802 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36803 goto rel;
36804 }
36805 if (ep->rxid == FC_XID_UNKNOWN)
36806 ep->rxid = ntohs(fh->fh_rx_id);
36807 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36808 - atomic_inc(&mp->stats.xid_not_found);
36809 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36810 goto rel;
36811 }
36812 if (ep->did != ntoh24(fh->fh_s_id) &&
36813 ep->did != FC_FID_FLOGI) {
36814 - atomic_inc(&mp->stats.xid_not_found);
36815 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36816 goto rel;
36817 }
36818 sof = fr_sof(fp);
36819 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36820 sp->ssb_stat |= SSB_ST_RESP;
36821 sp->id = fh->fh_seq_id;
36822 } else if (sp->id != fh->fh_seq_id) {
36823 - atomic_inc(&mp->stats.seq_not_found);
36824 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36825 goto rel;
36826 }
36827
36828 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36829 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36830
36831 if (!sp)
36832 - atomic_inc(&mp->stats.xid_not_found);
36833 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36834 else
36835 - atomic_inc(&mp->stats.non_bls_resp);
36836 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36837
36838 fc_frame_free(fp);
36839 }
36840 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36841 index db9238f..4378ed2 100644
36842 --- a/drivers/scsi/libsas/sas_ata.c
36843 +++ b/drivers/scsi/libsas/sas_ata.c
36844 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36845 .postreset = ata_std_postreset,
36846 .error_handler = ata_std_error_handler,
36847 .post_internal_cmd = sas_ata_post_internal,
36848 - .qc_defer = ata_std_qc_defer,
36849 + .qc_defer = ata_std_qc_defer,
36850 .qc_prep = ata_noop_qc_prep,
36851 .qc_issue = sas_ata_qc_issue,
36852 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36853 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36854 index 825f930..ce42672 100644
36855 --- a/drivers/scsi/lpfc/lpfc.h
36856 +++ b/drivers/scsi/lpfc/lpfc.h
36857 @@ -413,7 +413,7 @@ struct lpfc_vport {
36858 struct dentry *debug_nodelist;
36859 struct dentry *vport_debugfs_root;
36860 struct lpfc_debugfs_trc *disc_trc;
36861 - atomic_t disc_trc_cnt;
36862 + atomic_unchecked_t disc_trc_cnt;
36863 #endif
36864 uint8_t stat_data_enabled;
36865 uint8_t stat_data_blocked;
36866 @@ -821,8 +821,8 @@ struct lpfc_hba {
36867 struct timer_list fabric_block_timer;
36868 unsigned long bit_flags;
36869 #define FABRIC_COMANDS_BLOCKED 0
36870 - atomic_t num_rsrc_err;
36871 - atomic_t num_cmd_success;
36872 + atomic_unchecked_t num_rsrc_err;
36873 + atomic_unchecked_t num_cmd_success;
36874 unsigned long last_rsrc_error_time;
36875 unsigned long last_ramp_down_time;
36876 unsigned long last_ramp_up_time;
36877 @@ -852,7 +852,7 @@ struct lpfc_hba {
36878
36879 struct dentry *debug_slow_ring_trc;
36880 struct lpfc_debugfs_trc *slow_ring_trc;
36881 - atomic_t slow_ring_trc_cnt;
36882 + atomic_unchecked_t slow_ring_trc_cnt;
36883 /* iDiag debugfs sub-directory */
36884 struct dentry *idiag_root;
36885 struct dentry *idiag_pci_cfg;
36886 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36887 index 3587a3f..d45b81b 100644
36888 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36889 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36890 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36891
36892 #include <linux/debugfs.h>
36893
36894 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36895 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36896 static unsigned long lpfc_debugfs_start_time = 0L;
36897
36898 /* iDiag */
36899 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36900 lpfc_debugfs_enable = 0;
36901
36902 len = 0;
36903 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36904 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36905 (lpfc_debugfs_max_disc_trc - 1);
36906 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36907 dtp = vport->disc_trc + i;
36908 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36909 lpfc_debugfs_enable = 0;
36910
36911 len = 0;
36912 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36913 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36914 (lpfc_debugfs_max_slow_ring_trc - 1);
36915 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36916 dtp = phba->slow_ring_trc + i;
36917 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36918 !vport || !vport->disc_trc)
36919 return;
36920
36921 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36922 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36923 (lpfc_debugfs_max_disc_trc - 1);
36924 dtp = vport->disc_trc + index;
36925 dtp->fmt = fmt;
36926 dtp->data1 = data1;
36927 dtp->data2 = data2;
36928 dtp->data3 = data3;
36929 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36930 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36931 dtp->jif = jiffies;
36932 #endif
36933 return;
36934 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36935 !phba || !phba->slow_ring_trc)
36936 return;
36937
36938 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36939 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36940 (lpfc_debugfs_max_slow_ring_trc - 1);
36941 dtp = phba->slow_ring_trc + index;
36942 dtp->fmt = fmt;
36943 dtp->data1 = data1;
36944 dtp->data2 = data2;
36945 dtp->data3 = data3;
36946 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36947 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36948 dtp->jif = jiffies;
36949 #endif
36950 return;
36951 @@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36952 "slow_ring buffer\n");
36953 goto debug_failed;
36954 }
36955 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36956 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36957 memset(phba->slow_ring_trc, 0,
36958 (sizeof(struct lpfc_debugfs_trc) *
36959 lpfc_debugfs_max_slow_ring_trc));
36960 @@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36961 "buffer\n");
36962 goto debug_failed;
36963 }
36964 - atomic_set(&vport->disc_trc_cnt, 0);
36965 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36966
36967 snprintf(name, sizeof(name), "discovery_trace");
36968 vport->debug_disc_trc =
36969 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36970 index dfea2da..8e17227 100644
36971 --- a/drivers/scsi/lpfc/lpfc_init.c
36972 +++ b/drivers/scsi/lpfc/lpfc_init.c
36973 @@ -10145,8 +10145,10 @@ lpfc_init(void)
36974 printk(LPFC_COPYRIGHT "\n");
36975
36976 if (lpfc_enable_npiv) {
36977 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36978 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36979 + pax_open_kernel();
36980 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36981 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36982 + pax_close_kernel();
36983 }
36984 lpfc_transport_template =
36985 fc_attach_transport(&lpfc_transport_functions);
36986 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36987 index c60f5d0..751535c 100644
36988 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36989 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36990 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36991 uint32_t evt_posted;
36992
36993 spin_lock_irqsave(&phba->hbalock, flags);
36994 - atomic_inc(&phba->num_rsrc_err);
36995 + atomic_inc_unchecked(&phba->num_rsrc_err);
36996 phba->last_rsrc_error_time = jiffies;
36997
36998 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36999 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
37000 unsigned long flags;
37001 struct lpfc_hba *phba = vport->phba;
37002 uint32_t evt_posted;
37003 - atomic_inc(&phba->num_cmd_success);
37004 + atomic_inc_unchecked(&phba->num_cmd_success);
37005
37006 if (vport->cfg_lun_queue_depth <= queue_depth)
37007 return;
37008 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37009 unsigned long num_rsrc_err, num_cmd_success;
37010 int i;
37011
37012 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37013 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37014 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37015 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37016
37017 vports = lpfc_create_vport_work_array(phba);
37018 if (vports != NULL)
37019 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37020 }
37021 }
37022 lpfc_destroy_vport_work_array(phba, vports);
37023 - atomic_set(&phba->num_rsrc_err, 0);
37024 - atomic_set(&phba->num_cmd_success, 0);
37025 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37026 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37027 }
37028
37029 /**
37030 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37031 }
37032 }
37033 lpfc_destroy_vport_work_array(phba, vports);
37034 - atomic_set(&phba->num_rsrc_err, 0);
37035 - atomic_set(&phba->num_cmd_success, 0);
37036 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37037 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37038 }
37039
37040 /**
37041 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37042 index ea8a0b4..812a124 100644
37043 --- a/drivers/scsi/pmcraid.c
37044 +++ b/drivers/scsi/pmcraid.c
37045 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37046 res->scsi_dev = scsi_dev;
37047 scsi_dev->hostdata = res;
37048 res->change_detected = 0;
37049 - atomic_set(&res->read_failures, 0);
37050 - atomic_set(&res->write_failures, 0);
37051 + atomic_set_unchecked(&res->read_failures, 0);
37052 + atomic_set_unchecked(&res->write_failures, 0);
37053 rc = 0;
37054 }
37055 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37056 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37057
37058 /* If this was a SCSI read/write command keep count of errors */
37059 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37060 - atomic_inc(&res->read_failures);
37061 + atomic_inc_unchecked(&res->read_failures);
37062 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37063 - atomic_inc(&res->write_failures);
37064 + atomic_inc_unchecked(&res->write_failures);
37065
37066 if (!RES_IS_GSCSI(res->cfg_entry) &&
37067 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37068 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37069 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37070 * hrrq_id assigned here in queuecommand
37071 */
37072 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37073 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37074 pinstance->num_hrrq;
37075 cmd->cmd_done = pmcraid_io_done;
37076
37077 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37078 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37079 * hrrq_id assigned here in queuecommand
37080 */
37081 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37082 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37083 pinstance->num_hrrq;
37084
37085 if (request_size) {
37086 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37087
37088 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37089 /* add resources only after host is added into system */
37090 - if (!atomic_read(&pinstance->expose_resources))
37091 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37092 return;
37093
37094 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37095 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37096 init_waitqueue_head(&pinstance->reset_wait_q);
37097
37098 atomic_set(&pinstance->outstanding_cmds, 0);
37099 - atomic_set(&pinstance->last_message_id, 0);
37100 - atomic_set(&pinstance->expose_resources, 0);
37101 + atomic_set_unchecked(&pinstance->last_message_id, 0);
37102 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37103
37104 INIT_LIST_HEAD(&pinstance->free_res_q);
37105 INIT_LIST_HEAD(&pinstance->used_res_q);
37106 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37107 /* Schedule worker thread to handle CCN and take care of adding and
37108 * removing devices to OS
37109 */
37110 - atomic_set(&pinstance->expose_resources, 1);
37111 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37112 schedule_work(&pinstance->worker_q);
37113 return rc;
37114
37115 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37116 index ca496c7..9c791d5 100644
37117 --- a/drivers/scsi/pmcraid.h
37118 +++ b/drivers/scsi/pmcraid.h
37119 @@ -748,7 +748,7 @@ struct pmcraid_instance {
37120 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37121
37122 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37123 - atomic_t last_message_id;
37124 + atomic_unchecked_t last_message_id;
37125
37126 /* configuration table */
37127 struct pmcraid_config_table *cfg_table;
37128 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37129 atomic_t outstanding_cmds;
37130
37131 /* should add/delete resources to mid-layer now ?*/
37132 - atomic_t expose_resources;
37133 + atomic_unchecked_t expose_resources;
37134
37135
37136
37137 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37138 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37139 };
37140 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37141 - atomic_t read_failures; /* count of failed READ commands */
37142 - atomic_t write_failures; /* count of failed WRITE commands */
37143 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37144 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37145
37146 /* To indicate add/delete/modify during CCN */
37147 u8 change_detected;
37148 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37149 index af1003f..be55a75 100644
37150 --- a/drivers/scsi/qla2xxx/qla_def.h
37151 +++ b/drivers/scsi/qla2xxx/qla_def.h
37152 @@ -2247,7 +2247,7 @@ struct isp_operations {
37153 int (*start_scsi) (srb_t *);
37154 int (*abort_isp) (struct scsi_qla_host *);
37155 int (*iospace_config)(struct qla_hw_data*);
37156 -};
37157 +} __no_const;
37158
37159 /* MSI-X Support *************************************************************/
37160
37161 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37162 index bfe6854..ceac088 100644
37163 --- a/drivers/scsi/qla4xxx/ql4_def.h
37164 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37165 @@ -261,7 +261,7 @@ struct ddb_entry {
37166 * (4000 only) */
37167 atomic_t relogin_timer; /* Max Time to wait for
37168 * relogin to complete */
37169 - atomic_t relogin_retry_count; /* Num of times relogin has been
37170 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37171 * retried */
37172 uint32_t default_time2wait; /* Default Min time between
37173 * relogins (+aens) */
37174 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37175 index ce6d3b7..73fac54 100644
37176 --- a/drivers/scsi/qla4xxx/ql4_os.c
37177 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37178 @@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37179 */
37180 if (!iscsi_is_session_online(cls_sess)) {
37181 /* Reset retry relogin timer */
37182 - atomic_inc(&ddb_entry->relogin_retry_count);
37183 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37184 DEBUG2(ql4_printk(KERN_INFO, ha,
37185 "%s: index[%d] relogin timed out-retrying"
37186 " relogin (%d), retry (%d)\n", __func__,
37187 ddb_entry->fw_ddb_index,
37188 - atomic_read(&ddb_entry->relogin_retry_count),
37189 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37190 ddb_entry->default_time2wait + 4));
37191 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37192 atomic_set(&ddb_entry->retry_relogin_timer,
37193 @@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37194
37195 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37196 atomic_set(&ddb_entry->relogin_timer, 0);
37197 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37198 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37199 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37200 ddb_entry->default_relogin_timeout =
37201 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37202 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37203 index 2aeb2e9..46e3925 100644
37204 --- a/drivers/scsi/scsi.c
37205 +++ b/drivers/scsi/scsi.c
37206 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37207 unsigned long timeout;
37208 int rtn = 0;
37209
37210 - atomic_inc(&cmd->device->iorequest_cnt);
37211 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37212
37213 /* check if the device is still usable */
37214 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37215 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37216 index b2c95db..227d74e 100644
37217 --- a/drivers/scsi/scsi_lib.c
37218 +++ b/drivers/scsi/scsi_lib.c
37219 @@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37220 shost = sdev->host;
37221 scsi_init_cmd_errh(cmd);
37222 cmd->result = DID_NO_CONNECT << 16;
37223 - atomic_inc(&cmd->device->iorequest_cnt);
37224 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37225
37226 /*
37227 * SCSI request completion path will do scsi_device_unbusy(),
37228 @@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
37229
37230 INIT_LIST_HEAD(&cmd->eh_entry);
37231
37232 - atomic_inc(&cmd->device->iodone_cnt);
37233 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37234 if (cmd->result)
37235 - atomic_inc(&cmd->device->ioerr_cnt);
37236 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37237
37238 disposition = scsi_decide_disposition(cmd);
37239 if (disposition != SUCCESS &&
37240 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37241 index 04c2a27..9d8bd66 100644
37242 --- a/drivers/scsi/scsi_sysfs.c
37243 +++ b/drivers/scsi/scsi_sysfs.c
37244 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37245 char *buf) \
37246 { \
37247 struct scsi_device *sdev = to_scsi_device(dev); \
37248 - unsigned long long count = atomic_read(&sdev->field); \
37249 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37250 return snprintf(buf, 20, "0x%llx\n", count); \
37251 } \
37252 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37253 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37254 index 84a1fdf..693b0d6 100644
37255 --- a/drivers/scsi/scsi_tgt_lib.c
37256 +++ b/drivers/scsi/scsi_tgt_lib.c
37257 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37258 int err;
37259
37260 dprintk("%lx %u\n", uaddr, len);
37261 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37262 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37263 if (err) {
37264 /*
37265 * TODO: need to fixup sg_tablesize, max_segment_size,
37266 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37267 index f59d4a0..1d89407 100644
37268 --- a/drivers/scsi/scsi_transport_fc.c
37269 +++ b/drivers/scsi/scsi_transport_fc.c
37270 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37271 * Netlink Infrastructure
37272 */
37273
37274 -static atomic_t fc_event_seq;
37275 +static atomic_unchecked_t fc_event_seq;
37276
37277 /**
37278 * fc_get_event_number - Obtain the next sequential FC event number
37279 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
37280 u32
37281 fc_get_event_number(void)
37282 {
37283 - return atomic_add_return(1, &fc_event_seq);
37284 + return atomic_add_return_unchecked(1, &fc_event_seq);
37285 }
37286 EXPORT_SYMBOL(fc_get_event_number);
37287
37288 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
37289 {
37290 int error;
37291
37292 - atomic_set(&fc_event_seq, 0);
37293 + atomic_set_unchecked(&fc_event_seq, 0);
37294
37295 error = transport_class_register(&fc_host_class);
37296 if (error)
37297 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37298 char *cp;
37299
37300 *val = simple_strtoul(buf, &cp, 0);
37301 - if ((*cp && (*cp != '\n')) || (*val < 0))
37302 + if (*cp && (*cp != '\n'))
37303 return -EINVAL;
37304 /*
37305 * Check for overflow; dev_loss_tmo is u32
37306 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37307 index e3e3c7d..ebdab62 100644
37308 --- a/drivers/scsi/scsi_transport_iscsi.c
37309 +++ b/drivers/scsi/scsi_transport_iscsi.c
37310 @@ -79,7 +79,7 @@ struct iscsi_internal {
37311 struct transport_container session_cont;
37312 };
37313
37314 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37315 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37316 static struct workqueue_struct *iscsi_eh_timer_workq;
37317
37318 static DEFINE_IDA(iscsi_sess_ida);
37319 @@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37320 int err;
37321
37322 ihost = shost->shost_data;
37323 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37324 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37325
37326 if (target_id == ISCSI_MAX_TARGET) {
37327 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37328 @@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
37329 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37330 ISCSI_TRANSPORT_VERSION);
37331
37332 - atomic_set(&iscsi_session_nr, 0);
37333 + atomic_set_unchecked(&iscsi_session_nr, 0);
37334
37335 err = class_register(&iscsi_transport_class);
37336 if (err)
37337 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37338 index 21a045e..ec89e03 100644
37339 --- a/drivers/scsi/scsi_transport_srp.c
37340 +++ b/drivers/scsi/scsi_transport_srp.c
37341 @@ -33,7 +33,7 @@
37342 #include "scsi_transport_srp_internal.h"
37343
37344 struct srp_host_attrs {
37345 - atomic_t next_port_id;
37346 + atomic_unchecked_t next_port_id;
37347 };
37348 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37349
37350 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37351 struct Scsi_Host *shost = dev_to_shost(dev);
37352 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37353
37354 - atomic_set(&srp_host->next_port_id, 0);
37355 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37356 return 0;
37357 }
37358
37359 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37360 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37361 rport->roles = ids->roles;
37362
37363 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37364 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37365 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37366
37367 transport_setup_device(&rport->dev);
37368 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37369 index eacd46b..e3f4d62 100644
37370 --- a/drivers/scsi/sg.c
37371 +++ b/drivers/scsi/sg.c
37372 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37373 sdp->disk->disk_name,
37374 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37375 NULL,
37376 - (char *)arg);
37377 + (char __user *)arg);
37378 case BLKTRACESTART:
37379 return blk_trace_startstop(sdp->device->request_queue, 1);
37380 case BLKTRACESTOP:
37381 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37382 const struct file_operations * fops;
37383 };
37384
37385 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37386 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37387 {"allow_dio", &adio_fops},
37388 {"debug", &debug_fops},
37389 {"def_reserved_size", &dressz_fops},
37390 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
37391 if (!sg_proc_sgp)
37392 return 1;
37393 for (k = 0; k < num_leaves; ++k) {
37394 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37395 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37396 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37397 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37398 }
37399 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
37400 index f64250e..1ee3049 100644
37401 --- a/drivers/spi/spi-dw-pci.c
37402 +++ b/drivers/spi/spi-dw-pci.c
37403 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
37404 #define spi_resume NULL
37405 #endif
37406
37407 -static const struct pci_device_id pci_ids[] __devinitdata = {
37408 +static const struct pci_device_id pci_ids[] __devinitconst = {
37409 /* Intel MID platform SPI controller 0 */
37410 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
37411 {},
37412 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37413 index b2ccdea..84cde75 100644
37414 --- a/drivers/spi/spi.c
37415 +++ b/drivers/spi/spi.c
37416 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
37417 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37418
37419 /* portable code must never pass more than 32 bytes */
37420 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37421 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37422
37423 static u8 *buf;
37424
37425 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37426 index 400df8c..065d4f4 100644
37427 --- a/drivers/staging/octeon/ethernet-rx.c
37428 +++ b/drivers/staging/octeon/ethernet-rx.c
37429 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37430 /* Increment RX stats for virtual ports */
37431 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37432 #ifdef CONFIG_64BIT
37433 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37434 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37435 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37436 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37437 #else
37438 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37439 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37440 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37441 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37442 #endif
37443 }
37444 netif_receive_skb(skb);
37445 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37446 dev->name);
37447 */
37448 #ifdef CONFIG_64BIT
37449 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37450 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37451 #else
37452 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37453 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37454 #endif
37455 dev_kfree_skb_irq(skb);
37456 }
37457 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37458 index 9112cd8..92f8d51 100644
37459 --- a/drivers/staging/octeon/ethernet.c
37460 +++ b/drivers/staging/octeon/ethernet.c
37461 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37462 * since the RX tasklet also increments it.
37463 */
37464 #ifdef CONFIG_64BIT
37465 - atomic64_add(rx_status.dropped_packets,
37466 - (atomic64_t *)&priv->stats.rx_dropped);
37467 + atomic64_add_unchecked(rx_status.dropped_packets,
37468 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37469 #else
37470 - atomic_add(rx_status.dropped_packets,
37471 - (atomic_t *)&priv->stats.rx_dropped);
37472 + atomic_add_unchecked(rx_status.dropped_packets,
37473 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37474 #endif
37475 }
37476
37477 diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
37478 index f9dae95..ff48901 100644
37479 --- a/drivers/staging/rtl8192e/rtllib_module.c
37480 +++ b/drivers/staging/rtl8192e/rtllib_module.c
37481 @@ -215,6 +215,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37482 }
37483
37484 static int store_debug_level(struct file *file, const char __user *buffer,
37485 + unsigned long count, void *data) __size_overflow(3);
37486 +static int store_debug_level(struct file *file, const char __user *buffer,
37487 unsigned long count, void *data)
37488 {
37489 char buf[] = "0x00000000";
37490 diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37491 index e3d47bc..85f4d0d 100644
37492 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37493 +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37494 @@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37495 }
37496
37497 static int store_debug_level(struct file *file, const char *buffer,
37498 + unsigned long count, void *data) __size_overflow(3);
37499 +static int store_debug_level(struct file *file, const char *buffer,
37500 unsigned long count, void *data)
37501 {
37502 char buf[] = "0x00000000";
37503 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37504 index 86308a0..feaa925 100644
37505 --- a/drivers/staging/rtl8712/rtl871x_io.h
37506 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37507 @@ -108,7 +108,7 @@ struct _io_ops {
37508 u8 *pmem);
37509 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37510 u8 *pmem);
37511 -};
37512 +} __no_const;
37513
37514 struct io_req {
37515 struct list_head list;
37516 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37517 index c7b5e8b..783d6cb 100644
37518 --- a/drivers/staging/sbe-2t3e3/netdev.c
37519 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37520 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37521 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37522
37523 if (rlen)
37524 - if (copy_to_user(data, &resp, rlen))
37525 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37526 return -EFAULT;
37527
37528 return 0;
37529 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37530 index 42cdafe..2769103 100644
37531 --- a/drivers/staging/speakup/speakup_soft.c
37532 +++ b/drivers/staging/speakup/speakup_soft.c
37533 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37534 break;
37535 } else if (!initialized) {
37536 if (*init) {
37537 - ch = *init;
37538 init++;
37539 } else {
37540 initialized = 1;
37541 }
37542 + ch = *init;
37543 } else {
37544 ch = synth_buffer_getc();
37545 }
37546 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37547 index b8f8c48..1fc5025 100644
37548 --- a/drivers/staging/usbip/usbip_common.h
37549 +++ b/drivers/staging/usbip/usbip_common.h
37550 @@ -289,7 +289,7 @@ struct usbip_device {
37551 void (*shutdown)(struct usbip_device *);
37552 void (*reset)(struct usbip_device *);
37553 void (*unusable)(struct usbip_device *);
37554 - } eh_ops;
37555 + } __no_const eh_ops;
37556 };
37557
37558 /* usbip_common.c */
37559 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37560 index 88b3298..3783eee 100644
37561 --- a/drivers/staging/usbip/vhci.h
37562 +++ b/drivers/staging/usbip/vhci.h
37563 @@ -88,7 +88,7 @@ struct vhci_hcd {
37564 unsigned resuming:1;
37565 unsigned long re_timeout;
37566
37567 - atomic_t seqnum;
37568 + atomic_unchecked_t seqnum;
37569
37570 /*
37571 * NOTE:
37572 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37573 index 2ee97e2..0420b86 100644
37574 --- a/drivers/staging/usbip/vhci_hcd.c
37575 +++ b/drivers/staging/usbip/vhci_hcd.c
37576 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37577 return;
37578 }
37579
37580 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37581 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37582 if (priv->seqnum == 0xffff)
37583 dev_info(&urb->dev->dev, "seqnum max\n");
37584
37585 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37586 return -ENOMEM;
37587 }
37588
37589 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37590 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37591 if (unlink->seqnum == 0xffff)
37592 pr_info("seqnum max\n");
37593
37594 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37595 vdev->rhport = rhport;
37596 }
37597
37598 - atomic_set(&vhci->seqnum, 0);
37599 + atomic_set_unchecked(&vhci->seqnum, 0);
37600 spin_lock_init(&vhci->lock);
37601
37602 hcd->power_budget = 0; /* no limit */
37603 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37604 index 3f511b4..d3dbc1e 100644
37605 --- a/drivers/staging/usbip/vhci_rx.c
37606 +++ b/drivers/staging/usbip/vhci_rx.c
37607 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37608 if (!urb) {
37609 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37610 pr_info("max seqnum %d\n",
37611 - atomic_read(&the_controller->seqnum));
37612 + atomic_read_unchecked(&the_controller->seqnum));
37613 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37614 return;
37615 }
37616 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37617 index 7735027..30eed13 100644
37618 --- a/drivers/staging/vt6655/hostap.c
37619 +++ b/drivers/staging/vt6655/hostap.c
37620 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37621 *
37622 */
37623
37624 +static net_device_ops_no_const apdev_netdev_ops;
37625 +
37626 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37627 {
37628 PSDevice apdev_priv;
37629 struct net_device *dev = pDevice->dev;
37630 int ret;
37631 - const struct net_device_ops apdev_netdev_ops = {
37632 - .ndo_start_xmit = pDevice->tx_80211,
37633 - };
37634
37635 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37636
37637 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37638 *apdev_priv = *pDevice;
37639 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37640
37641 + /* only half broken now */
37642 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37643 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37644
37645 pDevice->apdev->type = ARPHRD_IEEE80211;
37646 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37647 index 51b5adf..098e320 100644
37648 --- a/drivers/staging/vt6656/hostap.c
37649 +++ b/drivers/staging/vt6656/hostap.c
37650 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37651 *
37652 */
37653
37654 +static net_device_ops_no_const apdev_netdev_ops;
37655 +
37656 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37657 {
37658 PSDevice apdev_priv;
37659 struct net_device *dev = pDevice->dev;
37660 int ret;
37661 - const struct net_device_ops apdev_netdev_ops = {
37662 - .ndo_start_xmit = pDevice->tx_80211,
37663 - };
37664
37665 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37666
37667 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37668 *apdev_priv = *pDevice;
37669 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37670
37671 + /* only half broken now */
37672 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37673 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37674
37675 pDevice->apdev->type = ARPHRD_IEEE80211;
37676 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37677 index 7843dfd..3db105f 100644
37678 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37679 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37680 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37681
37682 struct usbctlx_completor {
37683 int (*complete) (struct usbctlx_completor *);
37684 -};
37685 +} __no_const;
37686
37687 static int
37688 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37689 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37690 index 1ca66ea..76f1343 100644
37691 --- a/drivers/staging/zcache/tmem.c
37692 +++ b/drivers/staging/zcache/tmem.c
37693 @@ -39,7 +39,7 @@
37694 * A tmem host implementation must use this function to register callbacks
37695 * for memory allocation.
37696 */
37697 -static struct tmem_hostops tmem_hostops;
37698 +static tmem_hostops_no_const tmem_hostops;
37699
37700 static void tmem_objnode_tree_init(void);
37701
37702 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37703 * A tmem host implementation must use this function to register
37704 * callbacks for a page-accessible memory (PAM) implementation
37705 */
37706 -static struct tmem_pamops tmem_pamops;
37707 +static tmem_pamops_no_const tmem_pamops;
37708
37709 void tmem_register_pamops(struct tmem_pamops *m)
37710 {
37711 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37712 index ed147c4..94fc3c6 100644
37713 --- a/drivers/staging/zcache/tmem.h
37714 +++ b/drivers/staging/zcache/tmem.h
37715 @@ -180,6 +180,7 @@ struct tmem_pamops {
37716 void (*new_obj)(struct tmem_obj *);
37717 int (*replace_in_obj)(void *, struct tmem_obj *);
37718 };
37719 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37720 extern void tmem_register_pamops(struct tmem_pamops *m);
37721
37722 /* memory allocation methods provided by the host implementation */
37723 @@ -189,6 +190,7 @@ struct tmem_hostops {
37724 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37725 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37726 };
37727 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37728 extern void tmem_register_hostops(struct tmem_hostops *m);
37729
37730 /* core tmem accessor functions */
37731 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37732 index 97c74ee..7f6d77d 100644
37733 --- a/drivers/target/iscsi/iscsi_target.c
37734 +++ b/drivers/target/iscsi/iscsi_target.c
37735 @@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37736 * outstanding_r2ts reaches zero, go ahead and send the delayed
37737 * TASK_ABORTED status.
37738 */
37739 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37740 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37741 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37742 if (--cmd->outstanding_r2ts < 1) {
37743 iscsit_stop_dataout_timer(cmd);
37744 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37745 index dcb0618..97e3d85 100644
37746 --- a/drivers/target/target_core_tmr.c
37747 +++ b/drivers/target/target_core_tmr.c
37748 @@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
37749 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37750 cmd->t_task_list_num,
37751 atomic_read(&cmd->t_task_cdbs_left),
37752 - atomic_read(&cmd->t_task_cdbs_sent),
37753 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37754 atomic_read(&cmd->t_transport_active),
37755 atomic_read(&cmd->t_transport_stop),
37756 atomic_read(&cmd->t_transport_sent));
37757 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37758 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37759 " task: %p, t_fe_count: %d dev: %p\n", task,
37760 fe_count, dev);
37761 - atomic_set(&cmd->t_transport_aborted, 1);
37762 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37763 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37764
37765 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37766 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37767 }
37768 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37769 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37770 - atomic_set(&cmd->t_transport_aborted, 1);
37771 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37772 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37773
37774 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37775 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37776 index cd5cd95..5249d30 100644
37777 --- a/drivers/target/target_core_transport.c
37778 +++ b/drivers/target/target_core_transport.c
37779 @@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
37780 spin_lock_init(&dev->se_port_lock);
37781 spin_lock_init(&dev->se_tmr_lock);
37782 spin_lock_init(&dev->qf_cmd_lock);
37783 - atomic_set(&dev->dev_ordered_id, 0);
37784 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37785
37786 se_dev_set_default_attribs(dev, dev_limits);
37787
37788 @@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37789 * Used to determine when ORDERED commands should go from
37790 * Dormant to Active status.
37791 */
37792 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37793 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37794 smp_mb__after_atomic_inc();
37795 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37796 cmd->se_ordered_id, cmd->sam_task_attr,
37797 @@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
37798 " t_transport_active: %d t_transport_stop: %d"
37799 " t_transport_sent: %d\n", cmd->t_task_list_num,
37800 atomic_read(&cmd->t_task_cdbs_left),
37801 - atomic_read(&cmd->t_task_cdbs_sent),
37802 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37803 atomic_read(&cmd->t_task_cdbs_ex_left),
37804 atomic_read(&cmd->t_transport_active),
37805 atomic_read(&cmd->t_transport_stop),
37806 @@ -2121,9 +2121,9 @@ check_depth:
37807 cmd = task->task_se_cmd;
37808 spin_lock_irqsave(&cmd->t_state_lock, flags);
37809 task->task_flags |= (TF_ACTIVE | TF_SENT);
37810 - atomic_inc(&cmd->t_task_cdbs_sent);
37811 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37812
37813 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37814 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37815 cmd->t_task_list_num)
37816 atomic_set(&cmd->t_transport_sent, 1);
37817
37818 @@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
37819 atomic_set(&cmd->transport_lun_stop, 0);
37820 }
37821 if (!atomic_read(&cmd->t_transport_active) ||
37822 - atomic_read(&cmd->t_transport_aborted)) {
37823 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
37824 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37825 return false;
37826 }
37827 @@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37828 {
37829 int ret = 0;
37830
37831 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37832 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37833 if (!send_status ||
37834 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37835 return 1;
37836 @@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37837 */
37838 if (cmd->data_direction == DMA_TO_DEVICE) {
37839 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37840 - atomic_inc(&cmd->t_transport_aborted);
37841 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37842 smp_mb__after_atomic_inc();
37843 }
37844 }
37845 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37846 index b9040be..e3f5aab 100644
37847 --- a/drivers/tty/hvc/hvcs.c
37848 +++ b/drivers/tty/hvc/hvcs.c
37849 @@ -83,6 +83,7 @@
37850 #include <asm/hvcserver.h>
37851 #include <asm/uaccess.h>
37852 #include <asm/vio.h>
37853 +#include <asm/local.h>
37854
37855 /*
37856 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37857 @@ -270,7 +271,7 @@ struct hvcs_struct {
37858 unsigned int index;
37859
37860 struct tty_struct *tty;
37861 - int open_count;
37862 + local_t open_count;
37863
37864 /*
37865 * Used to tell the driver kernel_thread what operations need to take
37866 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37867
37868 spin_lock_irqsave(&hvcsd->lock, flags);
37869
37870 - if (hvcsd->open_count > 0) {
37871 + if (local_read(&hvcsd->open_count) > 0) {
37872 spin_unlock_irqrestore(&hvcsd->lock, flags);
37873 printk(KERN_INFO "HVCS: vterm state unchanged. "
37874 "The hvcs device node is still in use.\n");
37875 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37876 if ((retval = hvcs_partner_connect(hvcsd)))
37877 goto error_release;
37878
37879 - hvcsd->open_count = 1;
37880 + local_set(&hvcsd->open_count, 1);
37881 hvcsd->tty = tty;
37882 tty->driver_data = hvcsd;
37883
37884 @@ -1179,7 +1180,7 @@ fast_open:
37885
37886 spin_lock_irqsave(&hvcsd->lock, flags);
37887 kref_get(&hvcsd->kref);
37888 - hvcsd->open_count++;
37889 + local_inc(&hvcsd->open_count);
37890 hvcsd->todo_mask |= HVCS_SCHED_READ;
37891 spin_unlock_irqrestore(&hvcsd->lock, flags);
37892
37893 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37894 hvcsd = tty->driver_data;
37895
37896 spin_lock_irqsave(&hvcsd->lock, flags);
37897 - if (--hvcsd->open_count == 0) {
37898 + if (local_dec_and_test(&hvcsd->open_count)) {
37899
37900 vio_disable_interrupts(hvcsd->vdev);
37901
37902 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37903 free_irq(irq, hvcsd);
37904 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37905 return;
37906 - } else if (hvcsd->open_count < 0) {
37907 + } else if (local_read(&hvcsd->open_count) < 0) {
37908 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37909 " is missmanaged.\n",
37910 - hvcsd->vdev->unit_address, hvcsd->open_count);
37911 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37912 }
37913
37914 spin_unlock_irqrestore(&hvcsd->lock, flags);
37915 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37916
37917 spin_lock_irqsave(&hvcsd->lock, flags);
37918 /* Preserve this so that we know how many kref refs to put */
37919 - temp_open_count = hvcsd->open_count;
37920 + temp_open_count = local_read(&hvcsd->open_count);
37921
37922 /*
37923 * Don't kref put inside the spinlock because the destruction
37924 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37925 hvcsd->tty->driver_data = NULL;
37926 hvcsd->tty = NULL;
37927
37928 - hvcsd->open_count = 0;
37929 + local_set(&hvcsd->open_count, 0);
37930
37931 /* This will drop any buffered data on the floor which is OK in a hangup
37932 * scenario. */
37933 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37934 * the middle of a write operation? This is a crummy place to do this
37935 * but we want to keep it all in the spinlock.
37936 */
37937 - if (hvcsd->open_count <= 0) {
37938 + if (local_read(&hvcsd->open_count) <= 0) {
37939 spin_unlock_irqrestore(&hvcsd->lock, flags);
37940 return -ENODEV;
37941 }
37942 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37943 {
37944 struct hvcs_struct *hvcsd = tty->driver_data;
37945
37946 - if (!hvcsd || hvcsd->open_count <= 0)
37947 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37948 return 0;
37949
37950 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37951 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37952 index ef92869..f4ebd88 100644
37953 --- a/drivers/tty/ipwireless/tty.c
37954 +++ b/drivers/tty/ipwireless/tty.c
37955 @@ -29,6 +29,7 @@
37956 #include <linux/tty_driver.h>
37957 #include <linux/tty_flip.h>
37958 #include <linux/uaccess.h>
37959 +#include <asm/local.h>
37960
37961 #include "tty.h"
37962 #include "network.h"
37963 @@ -51,7 +52,7 @@ struct ipw_tty {
37964 int tty_type;
37965 struct ipw_network *network;
37966 struct tty_struct *linux_tty;
37967 - int open_count;
37968 + local_t open_count;
37969 unsigned int control_lines;
37970 struct mutex ipw_tty_mutex;
37971 int tx_bytes_queued;
37972 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37973 mutex_unlock(&tty->ipw_tty_mutex);
37974 return -ENODEV;
37975 }
37976 - if (tty->open_count == 0)
37977 + if (local_read(&tty->open_count) == 0)
37978 tty->tx_bytes_queued = 0;
37979
37980 - tty->open_count++;
37981 + local_inc(&tty->open_count);
37982
37983 tty->linux_tty = linux_tty;
37984 linux_tty->driver_data = tty;
37985 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37986
37987 static void do_ipw_close(struct ipw_tty *tty)
37988 {
37989 - tty->open_count--;
37990 -
37991 - if (tty->open_count == 0) {
37992 + if (local_dec_return(&tty->open_count) == 0) {
37993 struct tty_struct *linux_tty = tty->linux_tty;
37994
37995 if (linux_tty != NULL) {
37996 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37997 return;
37998
37999 mutex_lock(&tty->ipw_tty_mutex);
38000 - if (tty->open_count == 0) {
38001 + if (local_read(&tty->open_count) == 0) {
38002 mutex_unlock(&tty->ipw_tty_mutex);
38003 return;
38004 }
38005 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38006 return;
38007 }
38008
38009 - if (!tty->open_count) {
38010 + if (!local_read(&tty->open_count)) {
38011 mutex_unlock(&tty->ipw_tty_mutex);
38012 return;
38013 }
38014 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38015 return -ENODEV;
38016
38017 mutex_lock(&tty->ipw_tty_mutex);
38018 - if (!tty->open_count) {
38019 + if (!local_read(&tty->open_count)) {
38020 mutex_unlock(&tty->ipw_tty_mutex);
38021 return -EINVAL;
38022 }
38023 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38024 if (!tty)
38025 return -ENODEV;
38026
38027 - if (!tty->open_count)
38028 + if (!local_read(&tty->open_count))
38029 return -EINVAL;
38030
38031 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38032 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38033 if (!tty)
38034 return 0;
38035
38036 - if (!tty->open_count)
38037 + if (!local_read(&tty->open_count))
38038 return 0;
38039
38040 return tty->tx_bytes_queued;
38041 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38042 if (!tty)
38043 return -ENODEV;
38044
38045 - if (!tty->open_count)
38046 + if (!local_read(&tty->open_count))
38047 return -EINVAL;
38048
38049 return get_control_lines(tty);
38050 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38051 if (!tty)
38052 return -ENODEV;
38053
38054 - if (!tty->open_count)
38055 + if (!local_read(&tty->open_count))
38056 return -EINVAL;
38057
38058 return set_control_lines(tty, set, clear);
38059 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38060 if (!tty)
38061 return -ENODEV;
38062
38063 - if (!tty->open_count)
38064 + if (!local_read(&tty->open_count))
38065 return -EINVAL;
38066
38067 /* FIXME: Exactly how is the tty object locked here .. */
38068 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38069 against a parallel ioctl etc */
38070 mutex_lock(&ttyj->ipw_tty_mutex);
38071 }
38072 - while (ttyj->open_count)
38073 + while (local_read(&ttyj->open_count))
38074 do_ipw_close(ttyj);
38075 ipwireless_disassociate_network_ttys(network,
38076 ttyj->channel_idx);
38077 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38078 index fc7bbba..9527e93 100644
38079 --- a/drivers/tty/n_gsm.c
38080 +++ b/drivers/tty/n_gsm.c
38081 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38082 kref_init(&dlci->ref);
38083 mutex_init(&dlci->mutex);
38084 dlci->fifo = &dlci->_fifo;
38085 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38086 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38087 kfree(dlci);
38088 return NULL;
38089 }
38090 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38091 index d2256d0..97476fa 100644
38092 --- a/drivers/tty/n_tty.c
38093 +++ b/drivers/tty/n_tty.c
38094 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38095 {
38096 *ops = tty_ldisc_N_TTY;
38097 ops->owner = NULL;
38098 - ops->refcount = ops->flags = 0;
38099 + atomic_set(&ops->refcount, 0);
38100 + ops->flags = 0;
38101 }
38102 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38103 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38104 index d8653ab..f8afd9d 100644
38105 --- a/drivers/tty/pty.c
38106 +++ b/drivers/tty/pty.c
38107 @@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
38108 register_sysctl_table(pty_root_table);
38109
38110 /* Now create the /dev/ptmx special device */
38111 + pax_open_kernel();
38112 tty_default_fops(&ptmx_fops);
38113 - ptmx_fops.open = ptmx_open;
38114 + *(void **)&ptmx_fops.open = ptmx_open;
38115 + pax_close_kernel();
38116
38117 cdev_init(&ptmx_cdev, &ptmx_fops);
38118 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38119 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38120 index 2b42a01..32a2ed3 100644
38121 --- a/drivers/tty/serial/kgdboc.c
38122 +++ b/drivers/tty/serial/kgdboc.c
38123 @@ -24,8 +24,9 @@
38124 #define MAX_CONFIG_LEN 40
38125
38126 static struct kgdb_io kgdboc_io_ops;
38127 +static struct kgdb_io kgdboc_io_ops_console;
38128
38129 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38130 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38131 static int configured = -1;
38132
38133 static char config[MAX_CONFIG_LEN];
38134 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38135 kgdboc_unregister_kbd();
38136 if (configured == 1)
38137 kgdb_unregister_io_module(&kgdboc_io_ops);
38138 + else if (configured == 2)
38139 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
38140 }
38141
38142 static int configure_kgdboc(void)
38143 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38144 int err;
38145 char *cptr = config;
38146 struct console *cons;
38147 + int is_console = 0;
38148
38149 err = kgdboc_option_setup(config);
38150 if (err || !strlen(config) || isspace(config[0]))
38151 goto noconfig;
38152
38153 err = -ENODEV;
38154 - kgdboc_io_ops.is_console = 0;
38155 kgdb_tty_driver = NULL;
38156
38157 kgdboc_use_kms = 0;
38158 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38159 int idx;
38160 if (cons->device && cons->device(cons, &idx) == p &&
38161 idx == tty_line) {
38162 - kgdboc_io_ops.is_console = 1;
38163 + is_console = 1;
38164 break;
38165 }
38166 cons = cons->next;
38167 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38168 kgdb_tty_line = tty_line;
38169
38170 do_register:
38171 - err = kgdb_register_io_module(&kgdboc_io_ops);
38172 + if (is_console) {
38173 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38174 + configured = 2;
38175 + } else {
38176 + err = kgdb_register_io_module(&kgdboc_io_ops);
38177 + configured = 1;
38178 + }
38179 if (err)
38180 goto noconfig;
38181
38182 - configured = 1;
38183 -
38184 return 0;
38185
38186 noconfig:
38187 @@ -213,7 +220,7 @@ noconfig:
38188 static int __init init_kgdboc(void)
38189 {
38190 /* Already configured? */
38191 - if (configured == 1)
38192 + if (configured >= 1)
38193 return 0;
38194
38195 return configure_kgdboc();
38196 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38197 if (config[len - 1] == '\n')
38198 config[len - 1] = '\0';
38199
38200 - if (configured == 1)
38201 + if (configured >= 1)
38202 cleanup_kgdboc();
38203
38204 /* Go and configure with the new params. */
38205 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38206 .post_exception = kgdboc_post_exp_handler,
38207 };
38208
38209 +static struct kgdb_io kgdboc_io_ops_console = {
38210 + .name = "kgdboc",
38211 + .read_char = kgdboc_get_char,
38212 + .write_char = kgdboc_put_char,
38213 + .pre_exception = kgdboc_pre_exp_handler,
38214 + .post_exception = kgdboc_post_exp_handler,
38215 + .is_console = 1
38216 +};
38217 +
38218 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38219 /* This is only available if kgdboc is a built in for early debugging */
38220 static int __init kgdboc_early_init(char *opt)
38221 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38222 index 7867b7c..b3c119d 100644
38223 --- a/drivers/tty/sysrq.c
38224 +++ b/drivers/tty/sysrq.c
38225 @@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38226 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38227 size_t count, loff_t *ppos)
38228 {
38229 - if (count) {
38230 + if (count && capable(CAP_SYS_ADMIN)) {
38231 char c;
38232
38233 if (get_user(c, buf))
38234 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38235 index e41b9bb..84002fb 100644
38236 --- a/drivers/tty/tty_io.c
38237 +++ b/drivers/tty/tty_io.c
38238 @@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38239
38240 void tty_default_fops(struct file_operations *fops)
38241 {
38242 - *fops = tty_fops;
38243 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38244 }
38245
38246 /*
38247 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38248 index 24b95db..9c078d0 100644
38249 --- a/drivers/tty/tty_ldisc.c
38250 +++ b/drivers/tty/tty_ldisc.c
38251 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38252 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38253 struct tty_ldisc_ops *ldo = ld->ops;
38254
38255 - ldo->refcount--;
38256 + atomic_dec(&ldo->refcount);
38257 module_put(ldo->owner);
38258 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38259
38260 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38261 spin_lock_irqsave(&tty_ldisc_lock, flags);
38262 tty_ldiscs[disc] = new_ldisc;
38263 new_ldisc->num = disc;
38264 - new_ldisc->refcount = 0;
38265 + atomic_set(&new_ldisc->refcount, 0);
38266 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38267
38268 return ret;
38269 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38270 return -EINVAL;
38271
38272 spin_lock_irqsave(&tty_ldisc_lock, flags);
38273 - if (tty_ldiscs[disc]->refcount)
38274 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38275 ret = -EBUSY;
38276 else
38277 tty_ldiscs[disc] = NULL;
38278 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38279 if (ldops) {
38280 ret = ERR_PTR(-EAGAIN);
38281 if (try_module_get(ldops->owner)) {
38282 - ldops->refcount++;
38283 + atomic_inc(&ldops->refcount);
38284 ret = ldops;
38285 }
38286 }
38287 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38288 unsigned long flags;
38289
38290 spin_lock_irqsave(&tty_ldisc_lock, flags);
38291 - ldops->refcount--;
38292 + atomic_dec(&ldops->refcount);
38293 module_put(ldops->owner);
38294 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38295 }
38296 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38297 index a605549..6bd3c96 100644
38298 --- a/drivers/tty/vt/keyboard.c
38299 +++ b/drivers/tty/vt/keyboard.c
38300 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38301 kbd->kbdmode == VC_OFF) &&
38302 value != KVAL(K_SAK))
38303 return; /* SAK is allowed even in raw mode */
38304 +
38305 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38306 + {
38307 + void *func = fn_handler[value];
38308 + if (func == fn_show_state || func == fn_show_ptregs ||
38309 + func == fn_show_mem)
38310 + return;
38311 + }
38312 +#endif
38313 +
38314 fn_handler[value](vc);
38315 }
38316
38317 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38318 index 65447c5..0526f0a 100644
38319 --- a/drivers/tty/vt/vt_ioctl.c
38320 +++ b/drivers/tty/vt/vt_ioctl.c
38321 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38322 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38323 return -EFAULT;
38324
38325 - if (!capable(CAP_SYS_TTY_CONFIG))
38326 - perm = 0;
38327 -
38328 switch (cmd) {
38329 case KDGKBENT:
38330 key_map = key_maps[s];
38331 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38332 val = (i ? K_HOLE : K_NOSUCHMAP);
38333 return put_user(val, &user_kbe->kb_value);
38334 case KDSKBENT:
38335 + if (!capable(CAP_SYS_TTY_CONFIG))
38336 + perm = 0;
38337 +
38338 if (!perm)
38339 return -EPERM;
38340 if (!i && v == K_NOSUCHMAP) {
38341 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38342 int i, j, k;
38343 int ret;
38344
38345 - if (!capable(CAP_SYS_TTY_CONFIG))
38346 - perm = 0;
38347 -
38348 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38349 if (!kbs) {
38350 ret = -ENOMEM;
38351 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38352 kfree(kbs);
38353 return ((p && *p) ? -EOVERFLOW : 0);
38354 case KDSKBSENT:
38355 + if (!capable(CAP_SYS_TTY_CONFIG))
38356 + perm = 0;
38357 +
38358 if (!perm) {
38359 ret = -EPERM;
38360 goto reterr;
38361 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38362 index a783d53..cb30d94 100644
38363 --- a/drivers/uio/uio.c
38364 +++ b/drivers/uio/uio.c
38365 @@ -25,6 +25,7 @@
38366 #include <linux/kobject.h>
38367 #include <linux/cdev.h>
38368 #include <linux/uio_driver.h>
38369 +#include <asm/local.h>
38370
38371 #define UIO_MAX_DEVICES (1U << MINORBITS)
38372
38373 @@ -32,10 +33,10 @@ struct uio_device {
38374 struct module *owner;
38375 struct device *dev;
38376 int minor;
38377 - atomic_t event;
38378 + atomic_unchecked_t event;
38379 struct fasync_struct *async_queue;
38380 wait_queue_head_t wait;
38381 - int vma_count;
38382 + local_t vma_count;
38383 struct uio_info *info;
38384 struct kobject *map_dir;
38385 struct kobject *portio_dir;
38386 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38387 struct device_attribute *attr, char *buf)
38388 {
38389 struct uio_device *idev = dev_get_drvdata(dev);
38390 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38391 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38392 }
38393
38394 static struct device_attribute uio_class_attributes[] = {
38395 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38396 {
38397 struct uio_device *idev = info->uio_dev;
38398
38399 - atomic_inc(&idev->event);
38400 + atomic_inc_unchecked(&idev->event);
38401 wake_up_interruptible(&idev->wait);
38402 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38403 }
38404 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38405 }
38406
38407 listener->dev = idev;
38408 - listener->event_count = atomic_read(&idev->event);
38409 + listener->event_count = atomic_read_unchecked(&idev->event);
38410 filep->private_data = listener;
38411
38412 if (idev->info->open) {
38413 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38414 return -EIO;
38415
38416 poll_wait(filep, &idev->wait, wait);
38417 - if (listener->event_count != atomic_read(&idev->event))
38418 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38419 return POLLIN | POLLRDNORM;
38420 return 0;
38421 }
38422 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38423 do {
38424 set_current_state(TASK_INTERRUPTIBLE);
38425
38426 - event_count = atomic_read(&idev->event);
38427 + event_count = atomic_read_unchecked(&idev->event);
38428 if (event_count != listener->event_count) {
38429 if (copy_to_user(buf, &event_count, count))
38430 retval = -EFAULT;
38431 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38432 static void uio_vma_open(struct vm_area_struct *vma)
38433 {
38434 struct uio_device *idev = vma->vm_private_data;
38435 - idev->vma_count++;
38436 + local_inc(&idev->vma_count);
38437 }
38438
38439 static void uio_vma_close(struct vm_area_struct *vma)
38440 {
38441 struct uio_device *idev = vma->vm_private_data;
38442 - idev->vma_count--;
38443 + local_dec(&idev->vma_count);
38444 }
38445
38446 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38447 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38448 idev->owner = owner;
38449 idev->info = info;
38450 init_waitqueue_head(&idev->wait);
38451 - atomic_set(&idev->event, 0);
38452 + atomic_set_unchecked(&idev->event, 0);
38453
38454 ret = uio_get_minor(idev);
38455 if (ret)
38456 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38457 index 98b89fe..aff824e 100644
38458 --- a/drivers/usb/atm/cxacru.c
38459 +++ b/drivers/usb/atm/cxacru.c
38460 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38461 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38462 if (ret < 2)
38463 return -EINVAL;
38464 - if (index < 0 || index > 0x7f)
38465 + if (index > 0x7f)
38466 return -EINVAL;
38467 pos += tmp;
38468
38469 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38470 index d3448ca..d2864ca 100644
38471 --- a/drivers/usb/atm/usbatm.c
38472 +++ b/drivers/usb/atm/usbatm.c
38473 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38474 if (printk_ratelimit())
38475 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38476 __func__, vpi, vci);
38477 - atomic_inc(&vcc->stats->rx_err);
38478 + atomic_inc_unchecked(&vcc->stats->rx_err);
38479 return;
38480 }
38481
38482 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38483 if (length > ATM_MAX_AAL5_PDU) {
38484 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38485 __func__, length, vcc);
38486 - atomic_inc(&vcc->stats->rx_err);
38487 + atomic_inc_unchecked(&vcc->stats->rx_err);
38488 goto out;
38489 }
38490
38491 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38492 if (sarb->len < pdu_length) {
38493 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38494 __func__, pdu_length, sarb->len, vcc);
38495 - atomic_inc(&vcc->stats->rx_err);
38496 + atomic_inc_unchecked(&vcc->stats->rx_err);
38497 goto out;
38498 }
38499
38500 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38501 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38502 __func__, vcc);
38503 - atomic_inc(&vcc->stats->rx_err);
38504 + atomic_inc_unchecked(&vcc->stats->rx_err);
38505 goto out;
38506 }
38507
38508 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38509 if (printk_ratelimit())
38510 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38511 __func__, length);
38512 - atomic_inc(&vcc->stats->rx_drop);
38513 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38514 goto out;
38515 }
38516
38517 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38518
38519 vcc->push(vcc, skb);
38520
38521 - atomic_inc(&vcc->stats->rx);
38522 + atomic_inc_unchecked(&vcc->stats->rx);
38523 out:
38524 skb_trim(sarb, 0);
38525 }
38526 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38527 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38528
38529 usbatm_pop(vcc, skb);
38530 - atomic_inc(&vcc->stats->tx);
38531 + atomic_inc_unchecked(&vcc->stats->tx);
38532
38533 skb = skb_dequeue(&instance->sndqueue);
38534 }
38535 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38536 if (!left--)
38537 return sprintf(page,
38538 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38539 - atomic_read(&atm_dev->stats.aal5.tx),
38540 - atomic_read(&atm_dev->stats.aal5.tx_err),
38541 - atomic_read(&atm_dev->stats.aal5.rx),
38542 - atomic_read(&atm_dev->stats.aal5.rx_err),
38543 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38544 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38545 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38546 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38547 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38548 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38549
38550 if (!left--) {
38551 if (instance->disconnected)
38552 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38553 index d956965..4179a77 100644
38554 --- a/drivers/usb/core/devices.c
38555 +++ b/drivers/usb/core/devices.c
38556 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38557 * time it gets called.
38558 */
38559 static struct device_connect_event {
38560 - atomic_t count;
38561 + atomic_unchecked_t count;
38562 wait_queue_head_t wait;
38563 } device_event = {
38564 .count = ATOMIC_INIT(1),
38565 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38566
38567 void usbfs_conn_disc_event(void)
38568 {
38569 - atomic_add(2, &device_event.count);
38570 + atomic_add_unchecked(2, &device_event.count);
38571 wake_up(&device_event.wait);
38572 }
38573
38574 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38575
38576 poll_wait(file, &device_event.wait, wait);
38577
38578 - event_count = atomic_read(&device_event.count);
38579 + event_count = atomic_read_unchecked(&device_event.count);
38580 if (file->f_version != event_count) {
38581 file->f_version = event_count;
38582 return POLLIN | POLLRDNORM;
38583 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38584 index 1fc8f12..20647c1 100644
38585 --- a/drivers/usb/early/ehci-dbgp.c
38586 +++ b/drivers/usb/early/ehci-dbgp.c
38587 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38588
38589 #ifdef CONFIG_KGDB
38590 static struct kgdb_io kgdbdbgp_io_ops;
38591 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38592 +static struct kgdb_io kgdbdbgp_io_ops_console;
38593 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38594 #else
38595 #define dbgp_kgdb_mode (0)
38596 #endif
38597 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38598 .write_char = kgdbdbgp_write_char,
38599 };
38600
38601 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38602 + .name = "kgdbdbgp",
38603 + .read_char = kgdbdbgp_read_char,
38604 + .write_char = kgdbdbgp_write_char,
38605 + .is_console = 1
38606 +};
38607 +
38608 static int kgdbdbgp_wait_time;
38609
38610 static int __init kgdbdbgp_parse_config(char *str)
38611 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38612 ptr++;
38613 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38614 }
38615 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38616 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38617 + if (early_dbgp_console.index != -1)
38618 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38619 + else
38620 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38621
38622 return 0;
38623 }
38624 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38625 index d6bea3e..60b250e 100644
38626 --- a/drivers/usb/wusbcore/wa-hc.h
38627 +++ b/drivers/usb/wusbcore/wa-hc.h
38628 @@ -192,7 +192,7 @@ struct wahc {
38629 struct list_head xfer_delayed_list;
38630 spinlock_t xfer_list_lock;
38631 struct work_struct xfer_work;
38632 - atomic_t xfer_id_count;
38633 + atomic_unchecked_t xfer_id_count;
38634 };
38635
38636
38637 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38638 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38639 spin_lock_init(&wa->xfer_list_lock);
38640 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38641 - atomic_set(&wa->xfer_id_count, 1);
38642 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38643 }
38644
38645 /**
38646 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38647 index 57c01ab..8a05959 100644
38648 --- a/drivers/usb/wusbcore/wa-xfer.c
38649 +++ b/drivers/usb/wusbcore/wa-xfer.c
38650 @@ -296,7 +296,7 @@ out:
38651 */
38652 static void wa_xfer_id_init(struct wa_xfer *xfer)
38653 {
38654 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38655 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38656 }
38657
38658 /*
38659 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38660 index c14c42b..f955cc2 100644
38661 --- a/drivers/vhost/vhost.c
38662 +++ b/drivers/vhost/vhost.c
38663 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38664 return 0;
38665 }
38666
38667 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38668 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38669 {
38670 struct file *eventfp, *filep = NULL,
38671 *pollstart = NULL, *pollstop = NULL;
38672 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38673 index b0b2ac3..89a4399 100644
38674 --- a/drivers/video/aty/aty128fb.c
38675 +++ b/drivers/video/aty/aty128fb.c
38676 @@ -148,7 +148,7 @@ enum {
38677 };
38678
38679 /* Must match above enum */
38680 -static const char *r128_family[] __devinitdata = {
38681 +static const char *r128_family[] __devinitconst = {
38682 "AGP",
38683 "PCI",
38684 "PRO AGP",
38685 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38686 index 5c3960d..15cf8fc 100644
38687 --- a/drivers/video/fbcmap.c
38688 +++ b/drivers/video/fbcmap.c
38689 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38690 rc = -ENODEV;
38691 goto out;
38692 }
38693 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38694 - !info->fbops->fb_setcmap)) {
38695 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38696 rc = -EINVAL;
38697 goto out1;
38698 }
38699 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38700 index c6ce416..3b9b642 100644
38701 --- a/drivers/video/fbmem.c
38702 +++ b/drivers/video/fbmem.c
38703 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38704 image->dx += image->width + 8;
38705 }
38706 } else if (rotate == FB_ROTATE_UD) {
38707 - for (x = 0; x < num && image->dx >= 0; x++) {
38708 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38709 info->fbops->fb_imageblit(info, image);
38710 image->dx -= image->width + 8;
38711 }
38712 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38713 image->dy += image->height + 8;
38714 }
38715 } else if (rotate == FB_ROTATE_CCW) {
38716 - for (x = 0; x < num && image->dy >= 0; x++) {
38717 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38718 info->fbops->fb_imageblit(info, image);
38719 image->dy -= image->height + 8;
38720 }
38721 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38722 return -EFAULT;
38723 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38724 return -EINVAL;
38725 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38726 + if (con2fb.framebuffer >= FB_MAX)
38727 return -EINVAL;
38728 if (!registered_fb[con2fb.framebuffer])
38729 request_module("fb%d", con2fb.framebuffer);
38730 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38731 index 5a5d092..265c5ed 100644
38732 --- a/drivers/video/geode/gx1fb_core.c
38733 +++ b/drivers/video/geode/gx1fb_core.c
38734 @@ -29,7 +29,7 @@ static int crt_option = 1;
38735 static char panel_option[32] = "";
38736
38737 /* Modes relevant to the GX1 (taken from modedb.c) */
38738 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38739 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38740 /* 640x480-60 VESA */
38741 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38742 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38743 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38744 index 0fad23f..0e9afa4 100644
38745 --- a/drivers/video/gxt4500.c
38746 +++ b/drivers/video/gxt4500.c
38747 @@ -156,7 +156,7 @@ struct gxt4500_par {
38748 static char *mode_option;
38749
38750 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38751 -static const struct fb_videomode defaultmode __devinitdata = {
38752 +static const struct fb_videomode defaultmode __devinitconst = {
38753 .refresh = 60,
38754 .xres = 1280,
38755 .yres = 1024,
38756 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38757 return 0;
38758 }
38759
38760 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38761 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38762 .id = "IBM GXT4500P",
38763 .type = FB_TYPE_PACKED_PIXELS,
38764 .visual = FB_VISUAL_PSEUDOCOLOR,
38765 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38766 index 7672d2e..b56437f 100644
38767 --- a/drivers/video/i810/i810_accel.c
38768 +++ b/drivers/video/i810/i810_accel.c
38769 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38770 }
38771 }
38772 printk("ringbuffer lockup!!!\n");
38773 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38774 i810_report_error(mmio);
38775 par->dev_flags |= LOCKUP;
38776 info->pixmap.scan_align = 1;
38777 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38778 index b83f361..2b05a91 100644
38779 --- a/drivers/video/i810/i810_main.c
38780 +++ b/drivers/video/i810/i810_main.c
38781 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38782 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38783
38784 /* PCI */
38785 -static const char *i810_pci_list[] __devinitdata = {
38786 +static const char *i810_pci_list[] __devinitconst = {
38787 "Intel(R) 810 Framebuffer Device" ,
38788 "Intel(R) 810-DC100 Framebuffer Device" ,
38789 "Intel(R) 810E Framebuffer Device" ,
38790 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38791 index de36693..3c63fc2 100644
38792 --- a/drivers/video/jz4740_fb.c
38793 +++ b/drivers/video/jz4740_fb.c
38794 @@ -136,7 +136,7 @@ struct jzfb {
38795 uint32_t pseudo_palette[16];
38796 };
38797
38798 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38799 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38800 .id = "JZ4740 FB",
38801 .type = FB_TYPE_PACKED_PIXELS,
38802 .visual = FB_VISUAL_TRUECOLOR,
38803 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38804 index 3c14e43..eafa544 100644
38805 --- a/drivers/video/logo/logo_linux_clut224.ppm
38806 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38807 @@ -1,1604 +1,1123 @@
38808 P3
38809 -# Standard 224-color Linux logo
38810 80 80
38811 255
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 0 0 0 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 6 6 6 6 6 6 10 10 10 10 10 10
38822 - 10 10 10 6 6 6 6 6 6 6 6 6
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 0 0 0
38838 - 0 0 0 0 0 0 0 0 0 0 0 0
38839 - 0 0 0 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 6 6 6 10 10 10 14 14 14
38841 - 22 22 22 26 26 26 30 30 30 34 34 34
38842 - 30 30 30 30 30 30 26 26 26 18 18 18
38843 - 14 14 14 10 10 10 6 6 6 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 0 0 0 0 0 1 0 0 1 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 0 0 0
38858 - 0 0 0 0 0 0 0 0 0 0 0 0
38859 - 0 0 0 0 0 0 0 0 0 0 0 0
38860 - 6 6 6 14 14 14 26 26 26 42 42 42
38861 - 54 54 54 66 66 66 78 78 78 78 78 78
38862 - 78 78 78 74 74 74 66 66 66 54 54 54
38863 - 42 42 42 26 26 26 18 18 18 10 10 10
38864 - 6 6 6 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 1 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 0 0 0
38878 - 0 0 0 0 0 0 0 0 0 0 0 0
38879 - 0 0 0 0 0 0 0 0 0 10 10 10
38880 - 22 22 22 42 42 42 66 66 66 86 86 86
38881 - 66 66 66 38 38 38 38 38 38 22 22 22
38882 - 26 26 26 34 34 34 54 54 54 66 66 66
38883 - 86 86 86 70 70 70 46 46 46 26 26 26
38884 - 14 14 14 6 6 6 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 0 0 0
38893 - 0 0 1 0 0 1 0 0 1 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 0 0 0 0 0 0 0 0 0 0 0 0
38899 - 0 0 0 0 0 0 10 10 10 26 26 26
38900 - 50 50 50 82 82 82 58 58 58 6 6 6
38901 - 2 2 6 2 2 6 2 2 6 2 2 6
38902 - 2 2 6 2 2 6 2 2 6 2 2 6
38903 - 6 6 6 54 54 54 86 86 86 66 66 66
38904 - 38 38 38 18 18 18 6 6 6 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 0 0 0 0 0 0 0 0 0 0 0 0
38919 - 0 0 0 6 6 6 22 22 22 50 50 50
38920 - 78 78 78 34 34 34 2 2 6 2 2 6
38921 - 2 2 6 2 2 6 2 2 6 2 2 6
38922 - 2 2 6 2 2 6 2 2 6 2 2 6
38923 - 2 2 6 2 2 6 6 6 6 70 70 70
38924 - 78 78 78 46 46 46 22 22 22 6 6 6
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 0 0 0
38933 - 0 0 1 0 0 1 0 0 1 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 0 0 0
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 0 0 0 0 0 0 0 0 0 0 0 0
38939 - 6 6 6 18 18 18 42 42 42 82 82 82
38940 - 26 26 26 2 2 6 2 2 6 2 2 6
38941 - 2 2 6 2 2 6 2 2 6 2 2 6
38942 - 2 2 6 2 2 6 2 2 6 14 14 14
38943 - 46 46 46 34 34 34 6 6 6 2 2 6
38944 - 42 42 42 78 78 78 42 42 42 18 18 18
38945 - 6 6 6 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 1 0 0 0 0 0 1 0 0 0
38954 - 0 0 0 0 0 0 0 0 0 0 0 0
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 0 0 0
38958 - 0 0 0 0 0 0 0 0 0 0 0 0
38959 - 10 10 10 30 30 30 66 66 66 58 58 58
38960 - 2 2 6 2 2 6 2 2 6 2 2 6
38961 - 2 2 6 2 2 6 2 2 6 2 2 6
38962 - 2 2 6 2 2 6 2 2 6 26 26 26
38963 - 86 86 86 101 101 101 46 46 46 10 10 10
38964 - 2 2 6 58 58 58 70 70 70 34 34 34
38965 - 10 10 10 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 0 0 0
38973 - 0 0 1 0 0 1 0 0 1 0 0 0
38974 - 0 0 0 0 0 0 0 0 0 0 0 0
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 0 0 0
38978 - 0 0 0 0 0 0 0 0 0 0 0 0
38979 - 14 14 14 42 42 42 86 86 86 10 10 10
38980 - 2 2 6 2 2 6 2 2 6 2 2 6
38981 - 2 2 6 2 2 6 2 2 6 2 2 6
38982 - 2 2 6 2 2 6 2 2 6 30 30 30
38983 - 94 94 94 94 94 94 58 58 58 26 26 26
38984 - 2 2 6 6 6 6 78 78 78 54 54 54
38985 - 22 22 22 6 6 6 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 0 0 0
38994 - 0 0 0 0 0 0 0 0 0 0 0 0
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 0 0 0
38998 - 0 0 0 0 0 0 0 0 0 6 6 6
38999 - 22 22 22 62 62 62 62 62 62 2 2 6
39000 - 2 2 6 2 2 6 2 2 6 2 2 6
39001 - 2 2 6 2 2 6 2 2 6 2 2 6
39002 - 2 2 6 2 2 6 2 2 6 26 26 26
39003 - 54 54 54 38 38 38 18 18 18 10 10 10
39004 - 2 2 6 2 2 6 34 34 34 82 82 82
39005 - 38 38 38 14 14 14 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 0 0 0
39013 - 0 0 0 0 0 1 0 0 1 0 0 0
39014 - 0 0 0 0 0 0 0 0 0 0 0 0
39015 - 0 0 0 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 0 0 0
39018 - 0 0 0 0 0 0 0 0 0 6 6 6
39019 - 30 30 30 78 78 78 30 30 30 2 2 6
39020 - 2 2 6 2 2 6 2 2 6 2 2 6
39021 - 2 2 6 2 2 6 2 2 6 2 2 6
39022 - 2 2 6 2 2 6 2 2 6 10 10 10
39023 - 10 10 10 2 2 6 2 2 6 2 2 6
39024 - 2 2 6 2 2 6 2 2 6 78 78 78
39025 - 50 50 50 18 18 18 6 6 6 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 1 0 0 0 0 0 0 0 0 0
39034 - 0 0 0 0 0 0 0 0 0 0 0 0
39035 - 0 0 0 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 0 0 0
39038 - 0 0 0 0 0 0 0 0 0 10 10 10
39039 - 38 38 38 86 86 86 14 14 14 2 2 6
39040 - 2 2 6 2 2 6 2 2 6 2 2 6
39041 - 2 2 6 2 2 6 2 2 6 2 2 6
39042 - 2 2 6 2 2 6 2 2 6 2 2 6
39043 - 2 2 6 2 2 6 2 2 6 2 2 6
39044 - 2 2 6 2 2 6 2 2 6 54 54 54
39045 - 66 66 66 26 26 26 6 6 6 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 1 0 0 1 0 0 0
39054 - 0 0 0 0 0 0 0 0 0 0 0 0
39055 - 0 0 0 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 0 0 0
39058 - 0 0 0 0 0 0 0 0 0 14 14 14
39059 - 42 42 42 82 82 82 2 2 6 2 2 6
39060 - 2 2 6 6 6 6 10 10 10 2 2 6
39061 - 2 2 6 2 2 6 2 2 6 2 2 6
39062 - 2 2 6 2 2 6 2 2 6 6 6 6
39063 - 14 14 14 10 10 10 2 2 6 2 2 6
39064 - 2 2 6 2 2 6 2 2 6 18 18 18
39065 - 82 82 82 34 34 34 10 10 10 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 1 0 0 0 0 0 0 0 0 0
39074 - 0 0 0 0 0 0 0 0 0 0 0 0
39075 - 0 0 0 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 0 0 0
39078 - 0 0 0 0 0 0 0 0 0 14 14 14
39079 - 46 46 46 86 86 86 2 2 6 2 2 6
39080 - 6 6 6 6 6 6 22 22 22 34 34 34
39081 - 6 6 6 2 2 6 2 2 6 2 2 6
39082 - 2 2 6 2 2 6 18 18 18 34 34 34
39083 - 10 10 10 50 50 50 22 22 22 2 2 6
39084 - 2 2 6 2 2 6 2 2 6 10 10 10
39085 - 86 86 86 42 42 42 14 14 14 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 0 0 1 0 0 1 0 0 1 0 0 0
39094 - 0 0 0 0 0 0 0 0 0 0 0 0
39095 - 0 0 0 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 0 0 0
39098 - 0 0 0 0 0 0 0 0 0 14 14 14
39099 - 46 46 46 86 86 86 2 2 6 2 2 6
39100 - 38 38 38 116 116 116 94 94 94 22 22 22
39101 - 22 22 22 2 2 6 2 2 6 2 2 6
39102 - 14 14 14 86 86 86 138 138 138 162 162 162
39103 -154 154 154 38 38 38 26 26 26 6 6 6
39104 - 2 2 6 2 2 6 2 2 6 2 2 6
39105 - 86 86 86 46 46 46 14 14 14 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 0 0 0
39114 - 0 0 0 0 0 0 0 0 0 0 0 0
39115 - 0 0 0 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 0 0 0
39118 - 0 0 0 0 0 0 0 0 0 14 14 14
39119 - 46 46 46 86 86 86 2 2 6 14 14 14
39120 -134 134 134 198 198 198 195 195 195 116 116 116
39121 - 10 10 10 2 2 6 2 2 6 6 6 6
39122 -101 98 89 187 187 187 210 210 210 218 218 218
39123 -214 214 214 134 134 134 14 14 14 6 6 6
39124 - 2 2 6 2 2 6 2 2 6 2 2 6
39125 - 86 86 86 50 50 50 18 18 18 6 6 6
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 1 0 0 0
39133 - 0 0 1 0 0 1 0 0 1 0 0 0
39134 - 0 0 0 0 0 0 0 0 0 0 0 0
39135 - 0 0 0 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 0 0 0
39138 - 0 0 0 0 0 0 0 0 0 14 14 14
39139 - 46 46 46 86 86 86 2 2 6 54 54 54
39140 -218 218 218 195 195 195 226 226 226 246 246 246
39141 - 58 58 58 2 2 6 2 2 6 30 30 30
39142 -210 210 210 253 253 253 174 174 174 123 123 123
39143 -221 221 221 234 234 234 74 74 74 2 2 6
39144 - 2 2 6 2 2 6 2 2 6 2 2 6
39145 - 70 70 70 58 58 58 22 22 22 6 6 6
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 0 0 0
39154 - 0 0 0 0 0 0 0 0 0 0 0 0
39155 - 0 0 0 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 0 0 0
39158 - 0 0 0 0 0 0 0 0 0 14 14 14
39159 - 46 46 46 82 82 82 2 2 6 106 106 106
39160 -170 170 170 26 26 26 86 86 86 226 226 226
39161 -123 123 123 10 10 10 14 14 14 46 46 46
39162 -231 231 231 190 190 190 6 6 6 70 70 70
39163 - 90 90 90 238 238 238 158 158 158 2 2 6
39164 - 2 2 6 2 2 6 2 2 6 2 2 6
39165 - 70 70 70 58 58 58 22 22 22 6 6 6
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 1 0 0 0
39173 - 0 0 1 0 0 1 0 0 1 0 0 0
39174 - 0 0 0 0 0 0 0 0 0 0 0 0
39175 - 0 0 0 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 0 0 0 0 0 0
39178 - 0 0 0 0 0 0 0 0 0 14 14 14
39179 - 42 42 42 86 86 86 6 6 6 116 116 116
39180 -106 106 106 6 6 6 70 70 70 149 149 149
39181 -128 128 128 18 18 18 38 38 38 54 54 54
39182 -221 221 221 106 106 106 2 2 6 14 14 14
39183 - 46 46 46 190 190 190 198 198 198 2 2 6
39184 - 2 2 6 2 2 6 2 2 6 2 2 6
39185 - 74 74 74 62 62 62 22 22 22 6 6 6
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 1 0 0 0
39193 - 0 0 1 0 0 0 0 0 1 0 0 0
39194 - 0 0 0 0 0 0 0 0 0 0 0 0
39195 - 0 0 0 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 0 0 0 0 0 0
39198 - 0 0 0 0 0 0 0 0 0 14 14 14
39199 - 42 42 42 94 94 94 14 14 14 101 101 101
39200 -128 128 128 2 2 6 18 18 18 116 116 116
39201 -118 98 46 121 92 8 121 92 8 98 78 10
39202 -162 162 162 106 106 106 2 2 6 2 2 6
39203 - 2 2 6 195 195 195 195 195 195 6 6 6
39204 - 2 2 6 2 2 6 2 2 6 2 2 6
39205 - 74 74 74 62 62 62 22 22 22 6 6 6
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 1 0 0 1
39213 - 0 0 1 0 0 0 0 0 1 0 0 0
39214 - 0 0 0 0 0 0 0 0 0 0 0 0
39215 - 0 0 0 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 0 0 0 0 0 0 0 0 0
39218 - 0 0 0 0 0 0 0 0 0 10 10 10
39219 - 38 38 38 90 90 90 14 14 14 58 58 58
39220 -210 210 210 26 26 26 54 38 6 154 114 10
39221 -226 170 11 236 186 11 225 175 15 184 144 12
39222 -215 174 15 175 146 61 37 26 9 2 2 6
39223 - 70 70 70 246 246 246 138 138 138 2 2 6
39224 - 2 2 6 2 2 6 2 2 6 2 2 6
39225 - 70 70 70 66 66 66 26 26 26 6 6 6
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 0 0 0 0 0 0
39234 - 0 0 0 0 0 0 0 0 0 0 0 0
39235 - 0 0 0 0 0 0 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 0 0 0
39238 - 0 0 0 0 0 0 0 0 0 10 10 10
39239 - 38 38 38 86 86 86 14 14 14 10 10 10
39240 -195 195 195 188 164 115 192 133 9 225 175 15
39241 -239 182 13 234 190 10 232 195 16 232 200 30
39242 -245 207 45 241 208 19 232 195 16 184 144 12
39243 -218 194 134 211 206 186 42 42 42 2 2 6
39244 - 2 2 6 2 2 6 2 2 6 2 2 6
39245 - 50 50 50 74 74 74 30 30 30 6 6 6
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 0 0 0 0 0 0 0 0 0
39254 - 0 0 0 0 0 0 0 0 0 0 0 0
39255 - 0 0 0 0 0 0 0 0 0 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 0 0 0
39258 - 0 0 0 0 0 0 0 0 0 10 10 10
39259 - 34 34 34 86 86 86 14 14 14 2 2 6
39260 -121 87 25 192 133 9 219 162 10 239 182 13
39261 -236 186 11 232 195 16 241 208 19 244 214 54
39262 -246 218 60 246 218 38 246 215 20 241 208 19
39263 -241 208 19 226 184 13 121 87 25 2 2 6
39264 - 2 2 6 2 2 6 2 2 6 2 2 6
39265 - 50 50 50 82 82 82 34 34 34 10 10 10
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 0 0 0 0 0 0 0 0 0 0 0 0
39274 - 0 0 0 0 0 0 0 0 0 0 0 0
39275 - 0 0 0 0 0 0 0 0 0 0 0 0
39276 - 0 0 0 0 0 0 0 0 0 0 0 0
39277 - 0 0 0 0 0 0 0 0 0 0 0 0
39278 - 0 0 0 0 0 0 0 0 0 10 10 10
39279 - 34 34 34 82 82 82 30 30 30 61 42 6
39280 -180 123 7 206 145 10 230 174 11 239 182 13
39281 -234 190 10 238 202 15 241 208 19 246 218 74
39282 -246 218 38 246 215 20 246 215 20 246 215 20
39283 -226 184 13 215 174 15 184 144 12 6 6 6
39284 - 2 2 6 2 2 6 2 2 6 2 2 6
39285 - 26 26 26 94 94 94 42 42 42 14 14 14
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 0 0 0
39293 - 0 0 0 0 0 0 0 0 0 0 0 0
39294 - 0 0 0 0 0 0 0 0 0 0 0 0
39295 - 0 0 0 0 0 0 0 0 0 0 0 0
39296 - 0 0 0 0 0 0 0 0 0 0 0 0
39297 - 0 0 0 0 0 0 0 0 0 0 0 0
39298 - 0 0 0 0 0 0 0 0 0 10 10 10
39299 - 30 30 30 78 78 78 50 50 50 104 69 6
39300 -192 133 9 216 158 10 236 178 12 236 186 11
39301 -232 195 16 241 208 19 244 214 54 245 215 43
39302 -246 215 20 246 215 20 241 208 19 198 155 10
39303 -200 144 11 216 158 10 156 118 10 2 2 6
39304 - 2 2 6 2 2 6 2 2 6 2 2 6
39305 - 6 6 6 90 90 90 54 54 54 18 18 18
39306 - 6 6 6 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 0 0 0 0 0 0
39313 - 0 0 0 0 0 0 0 0 0 0 0 0
39314 - 0 0 0 0 0 0 0 0 0 0 0 0
39315 - 0 0 0 0 0 0 0 0 0 0 0 0
39316 - 0 0 0 0 0 0 0 0 0 0 0 0
39317 - 0 0 0 0 0 0 0 0 0 0 0 0
39318 - 0 0 0 0 0 0 0 0 0 10 10 10
39319 - 30 30 30 78 78 78 46 46 46 22 22 22
39320 -137 92 6 210 162 10 239 182 13 238 190 10
39321 -238 202 15 241 208 19 246 215 20 246 215 20
39322 -241 208 19 203 166 17 185 133 11 210 150 10
39323 -216 158 10 210 150 10 102 78 10 2 2 6
39324 - 6 6 6 54 54 54 14 14 14 2 2 6
39325 - 2 2 6 62 62 62 74 74 74 30 30 30
39326 - 10 10 10 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 0 0 0 0 0 0
39333 - 0 0 0 0 0 0 0 0 0 0 0 0
39334 - 0 0 0 0 0 0 0 0 0 0 0 0
39335 - 0 0 0 0 0 0 0 0 0 0 0 0
39336 - 0 0 0 0 0 0 0 0 0 0 0 0
39337 - 0 0 0 0 0 0 0 0 0 0 0 0
39338 - 0 0 0 0 0 0 0 0 0 10 10 10
39339 - 34 34 34 78 78 78 50 50 50 6 6 6
39340 - 94 70 30 139 102 15 190 146 13 226 184 13
39341 -232 200 30 232 195 16 215 174 15 190 146 13
39342 -168 122 10 192 133 9 210 150 10 213 154 11
39343 -202 150 34 182 157 106 101 98 89 2 2 6
39344 - 2 2 6 78 78 78 116 116 116 58 58 58
39345 - 2 2 6 22 22 22 90 90 90 46 46 46
39346 - 18 18 18 6 6 6 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 0 0 0 0 0 0 0 0 0
39353 - 0 0 0 0 0 0 0 0 0 0 0 0
39354 - 0 0 0 0 0 0 0 0 0 0 0 0
39355 - 0 0 0 0 0 0 0 0 0 0 0 0
39356 - 0 0 0 0 0 0 0 0 0 0 0 0
39357 - 0 0 0 0 0 0 0 0 0 0 0 0
39358 - 0 0 0 0 0 0 0 0 0 10 10 10
39359 - 38 38 38 86 86 86 50 50 50 6 6 6
39360 -128 128 128 174 154 114 156 107 11 168 122 10
39361 -198 155 10 184 144 12 197 138 11 200 144 11
39362 -206 145 10 206 145 10 197 138 11 188 164 115
39363 -195 195 195 198 198 198 174 174 174 14 14 14
39364 - 2 2 6 22 22 22 116 116 116 116 116 116
39365 - 22 22 22 2 2 6 74 74 74 70 70 70
39366 - 30 30 30 10 10 10 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 0 0 0 0 0 0 0 0 0 0 0 0
39373 - 0 0 0 0 0 0 0 0 0 0 0 0
39374 - 0 0 0 0 0 0 0 0 0 0 0 0
39375 - 0 0 0 0 0 0 0 0 0 0 0 0
39376 - 0 0 0 0 0 0 0 0 0 0 0 0
39377 - 0 0 0 0 0 0 0 0 0 0 0 0
39378 - 0 0 0 0 0 0 6 6 6 18 18 18
39379 - 50 50 50 101 101 101 26 26 26 10 10 10
39380 -138 138 138 190 190 190 174 154 114 156 107 11
39381 -197 138 11 200 144 11 197 138 11 192 133 9
39382 -180 123 7 190 142 34 190 178 144 187 187 187
39383 -202 202 202 221 221 221 214 214 214 66 66 66
39384 - 2 2 6 2 2 6 50 50 50 62 62 62
39385 - 6 6 6 2 2 6 10 10 10 90 90 90
39386 - 50 50 50 18 18 18 6 6 6 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 0 0 0 0 0 0 0 0 0 0 0 0
39393 - 0 0 0 0 0 0 0 0 0 0 0 0
39394 - 0 0 0 0 0 0 0 0 0 0 0 0
39395 - 0 0 0 0 0 0 0 0 0 0 0 0
39396 - 0 0 0 0 0 0 0 0 0 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 0 0 0 0 0 0 10 10 10 34 34 34
39399 - 74 74 74 74 74 74 2 2 6 6 6 6
39400 -144 144 144 198 198 198 190 190 190 178 166 146
39401 -154 121 60 156 107 11 156 107 11 168 124 44
39402 -174 154 114 187 187 187 190 190 190 210 210 210
39403 -246 246 246 253 253 253 253 253 253 182 182 182
39404 - 6 6 6 2 2 6 2 2 6 2 2 6
39405 - 2 2 6 2 2 6 2 2 6 62 62 62
39406 - 74 74 74 34 34 34 14 14 14 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 0 0 0 0 0 0 0 0 0 0 0 0
39413 - 0 0 0 0 0 0 0 0 0 0 0 0
39414 - 0 0 0 0 0 0 0 0 0 0 0 0
39415 - 0 0 0 0 0 0 0 0 0 0 0 0
39416 - 0 0 0 0 0 0 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 10 10 10 22 22 22 54 54 54
39419 - 94 94 94 18 18 18 2 2 6 46 46 46
39420 -234 234 234 221 221 221 190 190 190 190 190 190
39421 -190 190 190 187 187 187 187 187 187 190 190 190
39422 -190 190 190 195 195 195 214 214 214 242 242 242
39423 -253 253 253 253 253 253 253 253 253 253 253 253
39424 - 82 82 82 2 2 6 2 2 6 2 2 6
39425 - 2 2 6 2 2 6 2 2 6 14 14 14
39426 - 86 86 86 54 54 54 22 22 22 6 6 6
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 0 0 0
39432 - 0 0 0 0 0 0 0 0 0 0 0 0
39433 - 0 0 0 0 0 0 0 0 0 0 0 0
39434 - 0 0 0 0 0 0 0 0 0 0 0 0
39435 - 0 0 0 0 0 0 0 0 0 0 0 0
39436 - 0 0 0 0 0 0 0 0 0 0 0 0
39437 - 0 0 0 0 0 0 0 0 0 0 0 0
39438 - 6 6 6 18 18 18 46 46 46 90 90 90
39439 - 46 46 46 18 18 18 6 6 6 182 182 182
39440 -253 253 253 246 246 246 206 206 206 190 190 190
39441 -190 190 190 190 190 190 190 190 190 190 190 190
39442 -206 206 206 231 231 231 250 250 250 253 253 253
39443 -253 253 253 253 253 253 253 253 253 253 253 253
39444 -202 202 202 14 14 14 2 2 6 2 2 6
39445 - 2 2 6 2 2 6 2 2 6 2 2 6
39446 - 42 42 42 86 86 86 42 42 42 18 18 18
39447 - 6 6 6 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 0 0 0
39452 - 0 0 0 0 0 0 0 0 0 0 0 0
39453 - 0 0 0 0 0 0 0 0 0 0 0 0
39454 - 0 0 0 0 0 0 0 0 0 0 0 0
39455 - 0 0 0 0 0 0 0 0 0 0 0 0
39456 - 0 0 0 0 0 0 0 0 0 0 0 0
39457 - 0 0 0 0 0 0 0 0 0 6 6 6
39458 - 14 14 14 38 38 38 74 74 74 66 66 66
39459 - 2 2 6 6 6 6 90 90 90 250 250 250
39460 -253 253 253 253 253 253 238 238 238 198 198 198
39461 -190 190 190 190 190 190 195 195 195 221 221 221
39462 -246 246 246 253 253 253 253 253 253 253 253 253
39463 -253 253 253 253 253 253 253 253 253 253 253 253
39464 -253 253 253 82 82 82 2 2 6 2 2 6
39465 - 2 2 6 2 2 6 2 2 6 2 2 6
39466 - 2 2 6 78 78 78 70 70 70 34 34 34
39467 - 14 14 14 6 6 6 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 0 0 0
39472 - 0 0 0 0 0 0 0 0 0 0 0 0
39473 - 0 0 0 0 0 0 0 0 0 0 0 0
39474 - 0 0 0 0 0 0 0 0 0 0 0 0
39475 - 0 0 0 0 0 0 0 0 0 0 0 0
39476 - 0 0 0 0 0 0 0 0 0 0 0 0
39477 - 0 0 0 0 0 0 0 0 0 14 14 14
39478 - 34 34 34 66 66 66 78 78 78 6 6 6
39479 - 2 2 6 18 18 18 218 218 218 253 253 253
39480 -253 253 253 253 253 253 253 253 253 246 246 246
39481 -226 226 226 231 231 231 246 246 246 253 253 253
39482 -253 253 253 253 253 253 253 253 253 253 253 253
39483 -253 253 253 253 253 253 253 253 253 253 253 253
39484 -253 253 253 178 178 178 2 2 6 2 2 6
39485 - 2 2 6 2 2 6 2 2 6 2 2 6
39486 - 2 2 6 18 18 18 90 90 90 62 62 62
39487 - 30 30 30 10 10 10 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 0 0 0 0 0 0
39492 - 0 0 0 0 0 0 0 0 0 0 0 0
39493 - 0 0 0 0 0 0 0 0 0 0 0 0
39494 - 0 0 0 0 0 0 0 0 0 0 0 0
39495 - 0 0 0 0 0 0 0 0 0 0 0 0
39496 - 0 0 0 0 0 0 0 0 0 0 0 0
39497 - 0 0 0 0 0 0 10 10 10 26 26 26
39498 - 58 58 58 90 90 90 18 18 18 2 2 6
39499 - 2 2 6 110 110 110 253 253 253 253 253 253
39500 -253 253 253 253 253 253 253 253 253 253 253 253
39501 -250 250 250 253 253 253 253 253 253 253 253 253
39502 -253 253 253 253 253 253 253 253 253 253 253 253
39503 -253 253 253 253 253 253 253 253 253 253 253 253
39504 -253 253 253 231 231 231 18 18 18 2 2 6
39505 - 2 2 6 2 2 6 2 2 6 2 2 6
39506 - 2 2 6 2 2 6 18 18 18 94 94 94
39507 - 54 54 54 26 26 26 10 10 10 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 0 0 0 0 0 0
39512 - 0 0 0 0 0 0 0 0 0 0 0 0
39513 - 0 0 0 0 0 0 0 0 0 0 0 0
39514 - 0 0 0 0 0 0 0 0 0 0 0 0
39515 - 0 0 0 0 0 0 0 0 0 0 0 0
39516 - 0 0 0 0 0 0 0 0 0 0 0 0
39517 - 0 0 0 6 6 6 22 22 22 50 50 50
39518 - 90 90 90 26 26 26 2 2 6 2 2 6
39519 - 14 14 14 195 195 195 250 250 250 253 253 253
39520 -253 253 253 253 253 253 253 253 253 253 253 253
39521 -253 253 253 253 253 253 253 253 253 253 253 253
39522 -253 253 253 253 253 253 253 253 253 253 253 253
39523 -253 253 253 253 253 253 253 253 253 253 253 253
39524 -250 250 250 242 242 242 54 54 54 2 2 6
39525 - 2 2 6 2 2 6 2 2 6 2 2 6
39526 - 2 2 6 2 2 6 2 2 6 38 38 38
39527 - 86 86 86 50 50 50 22 22 22 6 6 6
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 0 0 0
39532 - 0 0 0 0 0 0 0 0 0 0 0 0
39533 - 0 0 0 0 0 0 0 0 0 0 0 0
39534 - 0 0 0 0 0 0 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 0 0 0 0 0 0 0 0 0 0 0 0
39537 - 6 6 6 14 14 14 38 38 38 82 82 82
39538 - 34 34 34 2 2 6 2 2 6 2 2 6
39539 - 42 42 42 195 195 195 246 246 246 253 253 253
39540 -253 253 253 253 253 253 253 253 253 250 250 250
39541 -242 242 242 242 242 242 250 250 250 253 253 253
39542 -253 253 253 253 253 253 253 253 253 253 253 253
39543 -253 253 253 250 250 250 246 246 246 238 238 238
39544 -226 226 226 231 231 231 101 101 101 6 6 6
39545 - 2 2 6 2 2 6 2 2 6 2 2 6
39546 - 2 2 6 2 2 6 2 2 6 2 2 6
39547 - 38 38 38 82 82 82 42 42 42 14 14 14
39548 - 6 6 6 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 0 0 0 0 0 0 0 0 0
39552 - 0 0 0 0 0 0 0 0 0 0 0 0
39553 - 0 0 0 0 0 0 0 0 0 0 0 0
39554 - 0 0 0 0 0 0 0 0 0 0 0 0
39555 - 0 0 0 0 0 0 0 0 0 0 0 0
39556 - 0 0 0 0 0 0 0 0 0 0 0 0
39557 - 10 10 10 26 26 26 62 62 62 66 66 66
39558 - 2 2 6 2 2 6 2 2 6 6 6 6
39559 - 70 70 70 170 170 170 206 206 206 234 234 234
39560 -246 246 246 250 250 250 250 250 250 238 238 238
39561 -226 226 226 231 231 231 238 238 238 250 250 250
39562 -250 250 250 250 250 250 246 246 246 231 231 231
39563 -214 214 214 206 206 206 202 202 202 202 202 202
39564 -198 198 198 202 202 202 182 182 182 18 18 18
39565 - 2 2 6 2 2 6 2 2 6 2 2 6
39566 - 2 2 6 2 2 6 2 2 6 2 2 6
39567 - 2 2 6 62 62 62 66 66 66 30 30 30
39568 - 10 10 10 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 0 0 0 0 0 0 0 0 0 0 0 0
39572 - 0 0 0 0 0 0 0 0 0 0 0 0
39573 - 0 0 0 0 0 0 0 0 0 0 0 0
39574 - 0 0 0 0 0 0 0 0 0 0 0 0
39575 - 0 0 0 0 0 0 0 0 0 0 0 0
39576 - 0 0 0 0 0 0 0 0 0 0 0 0
39577 - 14 14 14 42 42 42 82 82 82 18 18 18
39578 - 2 2 6 2 2 6 2 2 6 10 10 10
39579 - 94 94 94 182 182 182 218 218 218 242 242 242
39580 -250 250 250 253 253 253 253 253 253 250 250 250
39581 -234 234 234 253 253 253 253 253 253 253 253 253
39582 -253 253 253 253 253 253 253 253 253 246 246 246
39583 -238 238 238 226 226 226 210 210 210 202 202 202
39584 -195 195 195 195 195 195 210 210 210 158 158 158
39585 - 6 6 6 14 14 14 50 50 50 14 14 14
39586 - 2 2 6 2 2 6 2 2 6 2 2 6
39587 - 2 2 6 6 6 6 86 86 86 46 46 46
39588 - 18 18 18 6 6 6 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 0 0 0 0 0 0 0 0 0 0 0 0
39592 - 0 0 0 0 0 0 0 0 0 0 0 0
39593 - 0 0 0 0 0 0 0 0 0 0 0 0
39594 - 0 0 0 0 0 0 0 0 0 0 0 0
39595 - 0 0 0 0 0 0 0 0 0 0 0 0
39596 - 0 0 0 0 0 0 0 0 0 6 6 6
39597 - 22 22 22 54 54 54 70 70 70 2 2 6
39598 - 2 2 6 10 10 10 2 2 6 22 22 22
39599 -166 166 166 231 231 231 250 250 250 253 253 253
39600 -253 253 253 253 253 253 253 253 253 250 250 250
39601 -242 242 242 253 253 253 253 253 253 253 253 253
39602 -253 253 253 253 253 253 253 253 253 253 253 253
39603 -253 253 253 253 253 253 253 253 253 246 246 246
39604 -231 231 231 206 206 206 198 198 198 226 226 226
39605 - 94 94 94 2 2 6 6 6 6 38 38 38
39606 - 30 30 30 2 2 6 2 2 6 2 2 6
39607 - 2 2 6 2 2 6 62 62 62 66 66 66
39608 - 26 26 26 10 10 10 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 0 0 0
39611 - 0 0 0 0 0 0 0 0 0 0 0 0
39612 - 0 0 0 0 0 0 0 0 0 0 0 0
39613 - 0 0 0 0 0 0 0 0 0 0 0 0
39614 - 0 0 0 0 0 0 0 0 0 0 0 0
39615 - 0 0 0 0 0 0 0 0 0 0 0 0
39616 - 0 0 0 0 0 0 0 0 0 10 10 10
39617 - 30 30 30 74 74 74 50 50 50 2 2 6
39618 - 26 26 26 26 26 26 2 2 6 106 106 106
39619 -238 238 238 253 253 253 253 253 253 253 253 253
39620 -253 253 253 253 253 253 253 253 253 253 253 253
39621 -253 253 253 253 253 253 253 253 253 253 253 253
39622 -253 253 253 253 253 253 253 253 253 253 253 253
39623 -253 253 253 253 253 253 253 253 253 253 253 253
39624 -253 253 253 246 246 246 218 218 218 202 202 202
39625 -210 210 210 14 14 14 2 2 6 2 2 6
39626 - 30 30 30 22 22 22 2 2 6 2 2 6
39627 - 2 2 6 2 2 6 18 18 18 86 86 86
39628 - 42 42 42 14 14 14 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 0 0 0
39631 - 0 0 0 0 0 0 0 0 0 0 0 0
39632 - 0 0 0 0 0 0 0 0 0 0 0 0
39633 - 0 0 0 0 0 0 0 0 0 0 0 0
39634 - 0 0 0 0 0 0 0 0 0 0 0 0
39635 - 0 0 0 0 0 0 0 0 0 0 0 0
39636 - 0 0 0 0 0 0 0 0 0 14 14 14
39637 - 42 42 42 90 90 90 22 22 22 2 2 6
39638 - 42 42 42 2 2 6 18 18 18 218 218 218
39639 -253 253 253 253 253 253 253 253 253 253 253 253
39640 -253 253 253 253 253 253 253 253 253 253 253 253
39641 -253 253 253 253 253 253 253 253 253 253 253 253
39642 -253 253 253 253 253 253 253 253 253 253 253 253
39643 -253 253 253 253 253 253 253 253 253 253 253 253
39644 -253 253 253 253 253 253 250 250 250 221 221 221
39645 -218 218 218 101 101 101 2 2 6 14 14 14
39646 - 18 18 18 38 38 38 10 10 10 2 2 6
39647 - 2 2 6 2 2 6 2 2 6 78 78 78
39648 - 58 58 58 22 22 22 6 6 6 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 0 0 0
39651 - 0 0 0 0 0 0 0 0 0 0 0 0
39652 - 0 0 0 0 0 0 0 0 0 0 0 0
39653 - 0 0 0 0 0 0 0 0 0 0 0 0
39654 - 0 0 0 0 0 0 0 0 0 0 0 0
39655 - 0 0 0 0 0 0 0 0 0 0 0 0
39656 - 0 0 0 0 0 0 6 6 6 18 18 18
39657 - 54 54 54 82 82 82 2 2 6 26 26 26
39658 - 22 22 22 2 2 6 123 123 123 253 253 253
39659 -253 253 253 253 253 253 253 253 253 253 253 253
39660 -253 253 253 253 253 253 253 253 253 253 253 253
39661 -253 253 253 253 253 253 253 253 253 253 253 253
39662 -253 253 253 253 253 253 253 253 253 253 253 253
39663 -253 253 253 253 253 253 253 253 253 253 253 253
39664 -253 253 253 253 253 253 253 253 253 250 250 250
39665 -238 238 238 198 198 198 6 6 6 38 38 38
39666 - 58 58 58 26 26 26 38 38 38 2 2 6
39667 - 2 2 6 2 2 6 2 2 6 46 46 46
39668 - 78 78 78 30 30 30 10 10 10 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 0 0 0
39672 - 0 0 0 0 0 0 0 0 0 0 0 0
39673 - 0 0 0 0 0 0 0 0 0 0 0 0
39674 - 0 0 0 0 0 0 0 0 0 0 0 0
39675 - 0 0 0 0 0 0 0 0 0 0 0 0
39676 - 0 0 0 0 0 0 10 10 10 30 30 30
39677 - 74 74 74 58 58 58 2 2 6 42 42 42
39678 - 2 2 6 22 22 22 231 231 231 253 253 253
39679 -253 253 253 253 253 253 253 253 253 253 253 253
39680 -253 253 253 253 253 253 253 253 253 250 250 250
39681 -253 253 253 253 253 253 253 253 253 253 253 253
39682 -253 253 253 253 253 253 253 253 253 253 253 253
39683 -253 253 253 253 253 253 253 253 253 253 253 253
39684 -253 253 253 253 253 253 253 253 253 253 253 253
39685 -253 253 253 246 246 246 46 46 46 38 38 38
39686 - 42 42 42 14 14 14 38 38 38 14 14 14
39687 - 2 2 6 2 2 6 2 2 6 6 6 6
39688 - 86 86 86 46 46 46 14 14 14 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 0 0 0
39692 - 0 0 0 0 0 0 0 0 0 0 0 0
39693 - 0 0 0 0 0 0 0 0 0 0 0 0
39694 - 0 0 0 0 0 0 0 0 0 0 0 0
39695 - 0 0 0 0 0 0 0 0 0 0 0 0
39696 - 0 0 0 6 6 6 14 14 14 42 42 42
39697 - 90 90 90 18 18 18 18 18 18 26 26 26
39698 - 2 2 6 116 116 116 253 253 253 253 253 253
39699 -253 253 253 253 253 253 253 253 253 253 253 253
39700 -253 253 253 253 253 253 250 250 250 238 238 238
39701 -253 253 253 253 253 253 253 253 253 253 253 253
39702 -253 253 253 253 253 253 253 253 253 253 253 253
39703 -253 253 253 253 253 253 253 253 253 253 253 253
39704 -253 253 253 253 253 253 253 253 253 253 253 253
39705 -253 253 253 253 253 253 94 94 94 6 6 6
39706 - 2 2 6 2 2 6 10 10 10 34 34 34
39707 - 2 2 6 2 2 6 2 2 6 2 2 6
39708 - 74 74 74 58 58 58 22 22 22 6 6 6
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 0 0 0
39712 - 0 0 0 0 0 0 0 0 0 0 0 0
39713 - 0 0 0 0 0 0 0 0 0 0 0 0
39714 - 0 0 0 0 0 0 0 0 0 0 0 0
39715 - 0 0 0 0 0 0 0 0 0 0 0 0
39716 - 0 0 0 10 10 10 26 26 26 66 66 66
39717 - 82 82 82 2 2 6 38 38 38 6 6 6
39718 - 14 14 14 210 210 210 253 253 253 253 253 253
39719 -253 253 253 253 253 253 253 253 253 253 253 253
39720 -253 253 253 253 253 253 246 246 246 242 242 242
39721 -253 253 253 253 253 253 253 253 253 253 253 253
39722 -253 253 253 253 253 253 253 253 253 253 253 253
39723 -253 253 253 253 253 253 253 253 253 253 253 253
39724 -253 253 253 253 253 253 253 253 253 253 253 253
39725 -253 253 253 253 253 253 144 144 144 2 2 6
39726 - 2 2 6 2 2 6 2 2 6 46 46 46
39727 - 2 2 6 2 2 6 2 2 6 2 2 6
39728 - 42 42 42 74 74 74 30 30 30 10 10 10
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 0 0 0
39732 - 0 0 0 0 0 0 0 0 0 0 0 0
39733 - 0 0 0 0 0 0 0 0 0 0 0 0
39734 - 0 0 0 0 0 0 0 0 0 0 0 0
39735 - 0 0 0 0 0 0 0 0 0 0 0 0
39736 - 6 6 6 14 14 14 42 42 42 90 90 90
39737 - 26 26 26 6 6 6 42 42 42 2 2 6
39738 - 74 74 74 250 250 250 253 253 253 253 253 253
39739 -253 253 253 253 253 253 253 253 253 253 253 253
39740 -253 253 253 253 253 253 242 242 242 242 242 242
39741 -253 253 253 253 253 253 253 253 253 253 253 253
39742 -253 253 253 253 253 253 253 253 253 253 253 253
39743 -253 253 253 253 253 253 253 253 253 253 253 253
39744 -253 253 253 253 253 253 253 253 253 253 253 253
39745 -253 253 253 253 253 253 182 182 182 2 2 6
39746 - 2 2 6 2 2 6 2 2 6 46 46 46
39747 - 2 2 6 2 2 6 2 2 6 2 2 6
39748 - 10 10 10 86 86 86 38 38 38 10 10 10
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 0 0 0
39752 - 0 0 0 0 0 0 0 0 0 0 0 0
39753 - 0 0 0 0 0 0 0 0 0 0 0 0
39754 - 0 0 0 0 0 0 0 0 0 0 0 0
39755 - 0 0 0 0 0 0 0 0 0 0 0 0
39756 - 10 10 10 26 26 26 66 66 66 82 82 82
39757 - 2 2 6 22 22 22 18 18 18 2 2 6
39758 -149 149 149 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 253 253 253 253 253 253
39760 -253 253 253 253 253 253 234 234 234 242 242 242
39761 -253 253 253 253 253 253 253 253 253 253 253 253
39762 -253 253 253 253 253 253 253 253 253 253 253 253
39763 -253 253 253 253 253 253 253 253 253 253 253 253
39764 -253 253 253 253 253 253 253 253 253 253 253 253
39765 -253 253 253 253 253 253 206 206 206 2 2 6
39766 - 2 2 6 2 2 6 2 2 6 38 38 38
39767 - 2 2 6 2 2 6 2 2 6 2 2 6
39768 - 6 6 6 86 86 86 46 46 46 14 14 14
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 0 0 0
39772 - 0 0 0 0 0 0 0 0 0 0 0 0
39773 - 0 0 0 0 0 0 0 0 0 0 0 0
39774 - 0 0 0 0 0 0 0 0 0 0 0 0
39775 - 0 0 0 0 0 0 0 0 0 6 6 6
39776 - 18 18 18 46 46 46 86 86 86 18 18 18
39777 - 2 2 6 34 34 34 10 10 10 6 6 6
39778 -210 210 210 253 253 253 253 253 253 253 253 253
39779 -253 253 253 253 253 253 253 253 253 253 253 253
39780 -253 253 253 253 253 253 234 234 234 242 242 242
39781 -253 253 253 253 253 253 253 253 253 253 253 253
39782 -253 253 253 253 253 253 253 253 253 253 253 253
39783 -253 253 253 253 253 253 253 253 253 253 253 253
39784 -253 253 253 253 253 253 253 253 253 253 253 253
39785 -253 253 253 253 253 253 221 221 221 6 6 6
39786 - 2 2 6 2 2 6 6 6 6 30 30 30
39787 - 2 2 6 2 2 6 2 2 6 2 2 6
39788 - 2 2 6 82 82 82 54 54 54 18 18 18
39789 - 6 6 6 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 0 0 0 0 0 0 0 0 0 0 0 0
39793 - 0 0 0 0 0 0 0 0 0 0 0 0
39794 - 0 0 0 0 0 0 0 0 0 0 0 0
39795 - 0 0 0 0 0 0 0 0 0 10 10 10
39796 - 26 26 26 66 66 66 62 62 62 2 2 6
39797 - 2 2 6 38 38 38 10 10 10 26 26 26
39798 -238 238 238 253 253 253 253 253 253 253 253 253
39799 -253 253 253 253 253 253 253 253 253 253 253 253
39800 -253 253 253 253 253 253 231 231 231 238 238 238
39801 -253 253 253 253 253 253 253 253 253 253 253 253
39802 -253 253 253 253 253 253 253 253 253 253 253 253
39803 -253 253 253 253 253 253 253 253 253 253 253 253
39804 -253 253 253 253 253 253 253 253 253 253 253 253
39805 -253 253 253 253 253 253 231 231 231 6 6 6
39806 - 2 2 6 2 2 6 10 10 10 30 30 30
39807 - 2 2 6 2 2 6 2 2 6 2 2 6
39808 - 2 2 6 66 66 66 58 58 58 22 22 22
39809 - 6 6 6 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 0 0 0
39813 - 0 0 0 0 0 0 0 0 0 0 0 0
39814 - 0 0 0 0 0 0 0 0 0 0 0 0
39815 - 0 0 0 0 0 0 0 0 0 10 10 10
39816 - 38 38 38 78 78 78 6 6 6 2 2 6
39817 - 2 2 6 46 46 46 14 14 14 42 42 42
39818 -246 246 246 253 253 253 253 253 253 253 253 253
39819 -253 253 253 253 253 253 253 253 253 253 253 253
39820 -253 253 253 253 253 253 231 231 231 242 242 242
39821 -253 253 253 253 253 253 253 253 253 253 253 253
39822 -253 253 253 253 253 253 253 253 253 253 253 253
39823 -253 253 253 253 253 253 253 253 253 253 253 253
39824 -253 253 253 253 253 253 253 253 253 253 253 253
39825 -253 253 253 253 253 253 234 234 234 10 10 10
39826 - 2 2 6 2 2 6 22 22 22 14 14 14
39827 - 2 2 6 2 2 6 2 2 6 2 2 6
39828 - 2 2 6 66 66 66 62 62 62 22 22 22
39829 - 6 6 6 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 0 0 0 0 0 0 0 0 0 0 0 0
39833 - 0 0 0 0 0 0 0 0 0 0 0 0
39834 - 0 0 0 0 0 0 0 0 0 0 0 0
39835 - 0 0 0 0 0 0 6 6 6 18 18 18
39836 - 50 50 50 74 74 74 2 2 6 2 2 6
39837 - 14 14 14 70 70 70 34 34 34 62 62 62
39838 -250 250 250 253 253 253 253 253 253 253 253 253
39839 -253 253 253 253 253 253 253 253 253 253 253 253
39840 -253 253 253 253 253 253 231 231 231 246 246 246
39841 -253 253 253 253 253 253 253 253 253 253 253 253
39842 -253 253 253 253 253 253 253 253 253 253 253 253
39843 -253 253 253 253 253 253 253 253 253 253 253 253
39844 -253 253 253 253 253 253 253 253 253 253 253 253
39845 -253 253 253 253 253 253 234 234 234 14 14 14
39846 - 2 2 6 2 2 6 30 30 30 2 2 6
39847 - 2 2 6 2 2 6 2 2 6 2 2 6
39848 - 2 2 6 66 66 66 62 62 62 22 22 22
39849 - 6 6 6 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 0 0 0
39852 - 0 0 0 0 0 0 0 0 0 0 0 0
39853 - 0 0 0 0 0 0 0 0 0 0 0 0
39854 - 0 0 0 0 0 0 0 0 0 0 0 0
39855 - 0 0 0 0 0 0 6 6 6 18 18 18
39856 - 54 54 54 62 62 62 2 2 6 2 2 6
39857 - 2 2 6 30 30 30 46 46 46 70 70 70
39858 -250 250 250 253 253 253 253 253 253 253 253 253
39859 -253 253 253 253 253 253 253 253 253 253 253 253
39860 -253 253 253 253 253 253 231 231 231 246 246 246
39861 -253 253 253 253 253 253 253 253 253 253 253 253
39862 -253 253 253 253 253 253 253 253 253 253 253 253
39863 -253 253 253 253 253 253 253 253 253 253 253 253
39864 -253 253 253 253 253 253 253 253 253 253 253 253
39865 -253 253 253 253 253 253 226 226 226 10 10 10
39866 - 2 2 6 6 6 6 30 30 30 2 2 6
39867 - 2 2 6 2 2 6 2 2 6 2 2 6
39868 - 2 2 6 66 66 66 58 58 58 22 22 22
39869 - 6 6 6 0 0 0 0 0 0 0 0 0
39870 - 0 0 0 0 0 0 0 0 0 0 0 0
39871 - 0 0 0 0 0 0 0 0 0 0 0 0
39872 - 0 0 0 0 0 0 0 0 0 0 0 0
39873 - 0 0 0 0 0 0 0 0 0 0 0 0
39874 - 0 0 0 0 0 0 0 0 0 0 0 0
39875 - 0 0 0 0 0 0 6 6 6 22 22 22
39876 - 58 58 58 62 62 62 2 2 6 2 2 6
39877 - 2 2 6 2 2 6 30 30 30 78 78 78
39878 -250 250 250 253 253 253 253 253 253 253 253 253
39879 -253 253 253 253 253 253 253 253 253 253 253 253
39880 -253 253 253 253 253 253 231 231 231 246 246 246
39881 -253 253 253 253 253 253 253 253 253 253 253 253
39882 -253 253 253 253 253 253 253 253 253 253 253 253
39883 -253 253 253 253 253 253 253 253 253 253 253 253
39884 -253 253 253 253 253 253 253 253 253 253 253 253
39885 -253 253 253 253 253 253 206 206 206 2 2 6
39886 - 22 22 22 34 34 34 18 14 6 22 22 22
39887 - 26 26 26 18 18 18 6 6 6 2 2 6
39888 - 2 2 6 82 82 82 54 54 54 18 18 18
39889 - 6 6 6 0 0 0 0 0 0 0 0 0
39890 - 0 0 0 0 0 0 0 0 0 0 0 0
39891 - 0 0 0 0 0 0 0 0 0 0 0 0
39892 - 0 0 0 0 0 0 0 0 0 0 0 0
39893 - 0 0 0 0 0 0 0 0 0 0 0 0
39894 - 0 0 0 0 0 0 0 0 0 0 0 0
39895 - 0 0 0 0 0 0 6 6 6 26 26 26
39896 - 62 62 62 106 106 106 74 54 14 185 133 11
39897 -210 162 10 121 92 8 6 6 6 62 62 62
39898 -238 238 238 253 253 253 253 253 253 253 253 253
39899 -253 253 253 253 253 253 253 253 253 253 253 253
39900 -253 253 253 253 253 253 231 231 231 246 246 246
39901 -253 253 253 253 253 253 253 253 253 253 253 253
39902 -253 253 253 253 253 253 253 253 253 253 253 253
39903 -253 253 253 253 253 253 253 253 253 253 253 253
39904 -253 253 253 253 253 253 253 253 253 253 253 253
39905 -253 253 253 253 253 253 158 158 158 18 18 18
39906 - 14 14 14 2 2 6 2 2 6 2 2 6
39907 - 6 6 6 18 18 18 66 66 66 38 38 38
39908 - 6 6 6 94 94 94 50 50 50 18 18 18
39909 - 6 6 6 0 0 0 0 0 0 0 0 0
39910 - 0 0 0 0 0 0 0 0 0 0 0 0
39911 - 0 0 0 0 0 0 0 0 0 0 0 0
39912 - 0 0 0 0 0 0 0 0 0 0 0 0
39913 - 0 0 0 0 0 0 0 0 0 0 0 0
39914 - 0 0 0 0 0 0 0 0 0 6 6 6
39915 - 10 10 10 10 10 10 18 18 18 38 38 38
39916 - 78 78 78 142 134 106 216 158 10 242 186 14
39917 -246 190 14 246 190 14 156 118 10 10 10 10
39918 - 90 90 90 238 238 238 253 253 253 253 253 253
39919 -253 253 253 253 253 253 253 253 253 253 253 253
39920 -253 253 253 253 253 253 231 231 231 250 250 250
39921 -253 253 253 253 253 253 253 253 253 253 253 253
39922 -253 253 253 253 253 253 253 253 253 253 253 253
39923 -253 253 253 253 253 253 253 253 253 253 253 253
39924 -253 253 253 253 253 253 253 253 253 246 230 190
39925 -238 204 91 238 204 91 181 142 44 37 26 9
39926 - 2 2 6 2 2 6 2 2 6 2 2 6
39927 - 2 2 6 2 2 6 38 38 38 46 46 46
39928 - 26 26 26 106 106 106 54 54 54 18 18 18
39929 - 6 6 6 0 0 0 0 0 0 0 0 0
39930 - 0 0 0 0 0 0 0 0 0 0 0 0
39931 - 0 0 0 0 0 0 0 0 0 0 0 0
39932 - 0 0 0 0 0 0 0 0 0 0 0 0
39933 - 0 0 0 0 0 0 0 0 0 0 0 0
39934 - 0 0 0 6 6 6 14 14 14 22 22 22
39935 - 30 30 30 38 38 38 50 50 50 70 70 70
39936 -106 106 106 190 142 34 226 170 11 242 186 14
39937 -246 190 14 246 190 14 246 190 14 154 114 10
39938 - 6 6 6 74 74 74 226 226 226 253 253 253
39939 -253 253 253 253 253 253 253 253 253 253 253 253
39940 -253 253 253 253 253 253 231 231 231 250 250 250
39941 -253 253 253 253 253 253 253 253 253 253 253 253
39942 -253 253 253 253 253 253 253 253 253 253 253 253
39943 -253 253 253 253 253 253 253 253 253 253 253 253
39944 -253 253 253 253 253 253 253 253 253 228 184 62
39945 -241 196 14 241 208 19 232 195 16 38 30 10
39946 - 2 2 6 2 2 6 2 2 6 2 2 6
39947 - 2 2 6 6 6 6 30 30 30 26 26 26
39948 -203 166 17 154 142 90 66 66 66 26 26 26
39949 - 6 6 6 0 0 0 0 0 0 0 0 0
39950 - 0 0 0 0 0 0 0 0 0 0 0 0
39951 - 0 0 0 0 0 0 0 0 0 0 0 0
39952 - 0 0 0 0 0 0 0 0 0 0 0 0
39953 - 0 0 0 0 0 0 0 0 0 0 0 0
39954 - 6 6 6 18 18 18 38 38 38 58 58 58
39955 - 78 78 78 86 86 86 101 101 101 123 123 123
39956 -175 146 61 210 150 10 234 174 13 246 186 14
39957 -246 190 14 246 190 14 246 190 14 238 190 10
39958 -102 78 10 2 2 6 46 46 46 198 198 198
39959 -253 253 253 253 253 253 253 253 253 253 253 253
39960 -253 253 253 253 253 253 234 234 234 242 242 242
39961 -253 253 253 253 253 253 253 253 253 253 253 253
39962 -253 253 253 253 253 253 253 253 253 253 253 253
39963 -253 253 253 253 253 253 253 253 253 253 253 253
39964 -253 253 253 253 253 253 253 253 253 224 178 62
39965 -242 186 14 241 196 14 210 166 10 22 18 6
39966 - 2 2 6 2 2 6 2 2 6 2 2 6
39967 - 2 2 6 2 2 6 6 6 6 121 92 8
39968 -238 202 15 232 195 16 82 82 82 34 34 34
39969 - 10 10 10 0 0 0 0 0 0 0 0 0
39970 - 0 0 0 0 0 0 0 0 0 0 0 0
39971 - 0 0 0 0 0 0 0 0 0 0 0 0
39972 - 0 0 0 0 0 0 0 0 0 0 0 0
39973 - 0 0 0 0 0 0 0 0 0 0 0 0
39974 - 14 14 14 38 38 38 70 70 70 154 122 46
39975 -190 142 34 200 144 11 197 138 11 197 138 11
39976 -213 154 11 226 170 11 242 186 14 246 190 14
39977 -246 190 14 246 190 14 246 190 14 246 190 14
39978 -225 175 15 46 32 6 2 2 6 22 22 22
39979 -158 158 158 250 250 250 253 253 253 253 253 253
39980 -253 253 253 253 253 253 253 253 253 253 253 253
39981 -253 253 253 253 253 253 253 253 253 253 253 253
39982 -253 253 253 253 253 253 253 253 253 253 253 253
39983 -253 253 253 253 253 253 253 253 253 253 253 253
39984 -253 253 253 250 250 250 242 242 242 224 178 62
39985 -239 182 13 236 186 11 213 154 11 46 32 6
39986 - 2 2 6 2 2 6 2 2 6 2 2 6
39987 - 2 2 6 2 2 6 61 42 6 225 175 15
39988 -238 190 10 236 186 11 112 100 78 42 42 42
39989 - 14 14 14 0 0 0 0 0 0 0 0 0
39990 - 0 0 0 0 0 0 0 0 0 0 0 0
39991 - 0 0 0 0 0 0 0 0 0 0 0 0
39992 - 0 0 0 0 0 0 0 0 0 0 0 0
39993 - 0 0 0 0 0 0 0 0 0 6 6 6
39994 - 22 22 22 54 54 54 154 122 46 213 154 11
39995 -226 170 11 230 174 11 226 170 11 226 170 11
39996 -236 178 12 242 186 14 246 190 14 246 190 14
39997 -246 190 14 246 190 14 246 190 14 246 190 14
39998 -241 196 14 184 144 12 10 10 10 2 2 6
39999 - 6 6 6 116 116 116 242 242 242 253 253 253
40000 -253 253 253 253 253 253 253 253 253 253 253 253
40001 -253 253 253 253 253 253 253 253 253 253 253 253
40002 -253 253 253 253 253 253 253 253 253 253 253 253
40003 -253 253 253 253 253 253 253 253 253 253 253 253
40004 -253 253 253 231 231 231 198 198 198 214 170 54
40005 -236 178 12 236 178 12 210 150 10 137 92 6
40006 - 18 14 6 2 2 6 2 2 6 2 2 6
40007 - 6 6 6 70 47 6 200 144 11 236 178 12
40008 -239 182 13 239 182 13 124 112 88 58 58 58
40009 - 22 22 22 6 6 6 0 0 0 0 0 0
40010 - 0 0 0 0 0 0 0 0 0 0 0 0
40011 - 0 0 0 0 0 0 0 0 0 0 0 0
40012 - 0 0 0 0 0 0 0 0 0 0 0 0
40013 - 0 0 0 0 0 0 0 0 0 10 10 10
40014 - 30 30 30 70 70 70 180 133 36 226 170 11
40015 -239 182 13 242 186 14 242 186 14 246 186 14
40016 -246 190 14 246 190 14 246 190 14 246 190 14
40017 -246 190 14 246 190 14 246 190 14 246 190 14
40018 -246 190 14 232 195 16 98 70 6 2 2 6
40019 - 2 2 6 2 2 6 66 66 66 221 221 221
40020 -253 253 253 253 253 253 253 253 253 253 253 253
40021 -253 253 253 253 253 253 253 253 253 253 253 253
40022 -253 253 253 253 253 253 253 253 253 253 253 253
40023 -253 253 253 253 253 253 253 253 253 253 253 253
40024 -253 253 253 206 206 206 198 198 198 214 166 58
40025 -230 174 11 230 174 11 216 158 10 192 133 9
40026 -163 110 8 116 81 8 102 78 10 116 81 8
40027 -167 114 7 197 138 11 226 170 11 239 182 13
40028 -242 186 14 242 186 14 162 146 94 78 78 78
40029 - 34 34 34 14 14 14 6 6 6 0 0 0
40030 - 0 0 0 0 0 0 0 0 0 0 0 0
40031 - 0 0 0 0 0 0 0 0 0 0 0 0
40032 - 0 0 0 0 0 0 0 0 0 0 0 0
40033 - 0 0 0 0 0 0 0 0 0 6 6 6
40034 - 30 30 30 78 78 78 190 142 34 226 170 11
40035 -239 182 13 246 190 14 246 190 14 246 190 14
40036 -246 190 14 246 190 14 246 190 14 246 190 14
40037 -246 190 14 246 190 14 246 190 14 246 190 14
40038 -246 190 14 241 196 14 203 166 17 22 18 6
40039 - 2 2 6 2 2 6 2 2 6 38 38 38
40040 -218 218 218 253 253 253 253 253 253 253 253 253
40041 -253 253 253 253 253 253 253 253 253 253 253 253
40042 -253 253 253 253 253 253 253 253 253 253 253 253
40043 -253 253 253 253 253 253 253 253 253 253 253 253
40044 -250 250 250 206 206 206 198 198 198 202 162 69
40045 -226 170 11 236 178 12 224 166 10 210 150 10
40046 -200 144 11 197 138 11 192 133 9 197 138 11
40047 -210 150 10 226 170 11 242 186 14 246 190 14
40048 -246 190 14 246 186 14 225 175 15 124 112 88
40049 - 62 62 62 30 30 30 14 14 14 6 6 6
40050 - 0 0 0 0 0 0 0 0 0 0 0 0
40051 - 0 0 0 0 0 0 0 0 0 0 0 0
40052 - 0 0 0 0 0 0 0 0 0 0 0 0
40053 - 0 0 0 0 0 0 0 0 0 10 10 10
40054 - 30 30 30 78 78 78 174 135 50 224 166 10
40055 -239 182 13 246 190 14 246 190 14 246 190 14
40056 -246 190 14 246 190 14 246 190 14 246 190 14
40057 -246 190 14 246 190 14 246 190 14 246 190 14
40058 -246 190 14 246 190 14 241 196 14 139 102 15
40059 - 2 2 6 2 2 6 2 2 6 2 2 6
40060 - 78 78 78 250 250 250 253 253 253 253 253 253
40061 -253 253 253 253 253 253 253 253 253 253 253 253
40062 -253 253 253 253 253 253 253 253 253 253 253 253
40063 -253 253 253 253 253 253 253 253 253 253 253 253
40064 -250 250 250 214 214 214 198 198 198 190 150 46
40065 -219 162 10 236 178 12 234 174 13 224 166 10
40066 -216 158 10 213 154 11 213 154 11 216 158 10
40067 -226 170 11 239 182 13 246 190 14 246 190 14
40068 -246 190 14 246 190 14 242 186 14 206 162 42
40069 -101 101 101 58 58 58 30 30 30 14 14 14
40070 - 6 6 6 0 0 0 0 0 0 0 0 0
40071 - 0 0 0 0 0 0 0 0 0 0 0 0
40072 - 0 0 0 0 0 0 0 0 0 0 0 0
40073 - 0 0 0 0 0 0 0 0 0 10 10 10
40074 - 30 30 30 74 74 74 174 135 50 216 158 10
40075 -236 178 12 246 190 14 246 190 14 246 190 14
40076 -246 190 14 246 190 14 246 190 14 246 190 14
40077 -246 190 14 246 190 14 246 190 14 246 190 14
40078 -246 190 14 246 190 14 241 196 14 226 184 13
40079 - 61 42 6 2 2 6 2 2 6 2 2 6
40080 - 22 22 22 238 238 238 253 253 253 253 253 253
40081 -253 253 253 253 253 253 253 253 253 253 253 253
40082 -253 253 253 253 253 253 253 253 253 253 253 253
40083 -253 253 253 253 253 253 253 253 253 253 253 253
40084 -253 253 253 226 226 226 187 187 187 180 133 36
40085 -216 158 10 236 178 12 239 182 13 236 178 12
40086 -230 174 11 226 170 11 226 170 11 230 174 11
40087 -236 178 12 242 186 14 246 190 14 246 190 14
40088 -246 190 14 246 190 14 246 186 14 239 182 13
40089 -206 162 42 106 106 106 66 66 66 34 34 34
40090 - 14 14 14 6 6 6 0 0 0 0 0 0
40091 - 0 0 0 0 0 0 0 0 0 0 0 0
40092 - 0 0 0 0 0 0 0 0 0 0 0 0
40093 - 0 0 0 0 0 0 0 0 0 6 6 6
40094 - 26 26 26 70 70 70 163 133 67 213 154 11
40095 -236 178 12 246 190 14 246 190 14 246 190 14
40096 -246 190 14 246 190 14 246 190 14 246 190 14
40097 -246 190 14 246 190 14 246 190 14 246 190 14
40098 -246 190 14 246 190 14 246 190 14 241 196 14
40099 -190 146 13 18 14 6 2 2 6 2 2 6
40100 - 46 46 46 246 246 246 253 253 253 253 253 253
40101 -253 253 253 253 253 253 253 253 253 253 253 253
40102 -253 253 253 253 253 253 253 253 253 253 253 253
40103 -253 253 253 253 253 253 253 253 253 253 253 253
40104 -253 253 253 221 221 221 86 86 86 156 107 11
40105 -216 158 10 236 178 12 242 186 14 246 186 14
40106 -242 186 14 239 182 13 239 182 13 242 186 14
40107 -242 186 14 246 186 14 246 190 14 246 190 14
40108 -246 190 14 246 190 14 246 190 14 246 190 14
40109 -242 186 14 225 175 15 142 122 72 66 66 66
40110 - 30 30 30 10 10 10 0 0 0 0 0 0
40111 - 0 0 0 0 0 0 0 0 0 0 0 0
40112 - 0 0 0 0 0 0 0 0 0 0 0 0
40113 - 0 0 0 0 0 0 0 0 0 6 6 6
40114 - 26 26 26 70 70 70 163 133 67 210 150 10
40115 -236 178 12 246 190 14 246 190 14 246 190 14
40116 -246 190 14 246 190 14 246 190 14 246 190 14
40117 -246 190 14 246 190 14 246 190 14 246 190 14
40118 -246 190 14 246 190 14 246 190 14 246 190 14
40119 -232 195 16 121 92 8 34 34 34 106 106 106
40120 -221 221 221 253 253 253 253 253 253 253 253 253
40121 -253 253 253 253 253 253 253 253 253 253 253 253
40122 -253 253 253 253 253 253 253 253 253 253 253 253
40123 -253 253 253 253 253 253 253 253 253 253 253 253
40124 -242 242 242 82 82 82 18 14 6 163 110 8
40125 -216 158 10 236 178 12 242 186 14 246 190 14
40126 -246 190 14 246 190 14 246 190 14 246 190 14
40127 -246 190 14 246 190 14 246 190 14 246 190 14
40128 -246 190 14 246 190 14 246 190 14 246 190 14
40129 -246 190 14 246 190 14 242 186 14 163 133 67
40130 - 46 46 46 18 18 18 6 6 6 0 0 0
40131 - 0 0 0 0 0 0 0 0 0 0 0 0
40132 - 0 0 0 0 0 0 0 0 0 0 0 0
40133 - 0 0 0 0 0 0 0 0 0 10 10 10
40134 - 30 30 30 78 78 78 163 133 67 210 150 10
40135 -236 178 12 246 186 14 246 190 14 246 190 14
40136 -246 190 14 246 190 14 246 190 14 246 190 14
40137 -246 190 14 246 190 14 246 190 14 246 190 14
40138 -246 190 14 246 190 14 246 190 14 246 190 14
40139 -241 196 14 215 174 15 190 178 144 253 253 253
40140 -253 253 253 253 253 253 253 253 253 253 253 253
40141 -253 253 253 253 253 253 253 253 253 253 253 253
40142 -253 253 253 253 253 253 253 253 253 253 253 253
40143 -253 253 253 253 253 253 253 253 253 218 218 218
40144 - 58 58 58 2 2 6 22 18 6 167 114 7
40145 -216 158 10 236 178 12 246 186 14 246 190 14
40146 -246 190 14 246 190 14 246 190 14 246 190 14
40147 -246 190 14 246 190 14 246 190 14 246 190 14
40148 -246 190 14 246 190 14 246 190 14 246 190 14
40149 -246 190 14 246 186 14 242 186 14 190 150 46
40150 - 54 54 54 22 22 22 6 6 6 0 0 0
40151 - 0 0 0 0 0 0 0 0 0 0 0 0
40152 - 0 0 0 0 0 0 0 0 0 0 0 0
40153 - 0 0 0 0 0 0 0 0 0 14 14 14
40154 - 38 38 38 86 86 86 180 133 36 213 154 11
40155 -236 178 12 246 186 14 246 190 14 246 190 14
40156 -246 190 14 246 190 14 246 190 14 246 190 14
40157 -246 190 14 246 190 14 246 190 14 246 190 14
40158 -246 190 14 246 190 14 246 190 14 246 190 14
40159 -246 190 14 232 195 16 190 146 13 214 214 214
40160 -253 253 253 253 253 253 253 253 253 253 253 253
40161 -253 253 253 253 253 253 253 253 253 253 253 253
40162 -253 253 253 253 253 253 253 253 253 253 253 253
40163 -253 253 253 250 250 250 170 170 170 26 26 26
40164 - 2 2 6 2 2 6 37 26 9 163 110 8
40165 -219 162 10 239 182 13 246 186 14 246 190 14
40166 -246 190 14 246 190 14 246 190 14 246 190 14
40167 -246 190 14 246 190 14 246 190 14 246 190 14
40168 -246 190 14 246 190 14 246 190 14 246 190 14
40169 -246 186 14 236 178 12 224 166 10 142 122 72
40170 - 46 46 46 18 18 18 6 6 6 0 0 0
40171 - 0 0 0 0 0 0 0 0 0 0 0 0
40172 - 0 0 0 0 0 0 0 0 0 0 0 0
40173 - 0 0 0 0 0 0 6 6 6 18 18 18
40174 - 50 50 50 109 106 95 192 133 9 224 166 10
40175 -242 186 14 246 190 14 246 190 14 246 190 14
40176 -246 190 14 246 190 14 246 190 14 246 190 14
40177 -246 190 14 246 190 14 246 190 14 246 190 14
40178 -246 190 14 246 190 14 246 190 14 246 190 14
40179 -242 186 14 226 184 13 210 162 10 142 110 46
40180 -226 226 226 253 253 253 253 253 253 253 253 253
40181 -253 253 253 253 253 253 253 253 253 253 253 253
40182 -253 253 253 253 253 253 253 253 253 253 253 253
40183 -198 198 198 66 66 66 2 2 6 2 2 6
40184 - 2 2 6 2 2 6 50 34 6 156 107 11
40185 -219 162 10 239 182 13 246 186 14 246 190 14
40186 -246 190 14 246 190 14 246 190 14 246 190 14
40187 -246 190 14 246 190 14 246 190 14 246 190 14
40188 -246 190 14 246 190 14 246 190 14 242 186 14
40189 -234 174 13 213 154 11 154 122 46 66 66 66
40190 - 30 30 30 10 10 10 0 0 0 0 0 0
40191 - 0 0 0 0 0 0 0 0 0 0 0 0
40192 - 0 0 0 0 0 0 0 0 0 0 0 0
40193 - 0 0 0 0 0 0 6 6 6 22 22 22
40194 - 58 58 58 154 121 60 206 145 10 234 174 13
40195 -242 186 14 246 186 14 246 190 14 246 190 14
40196 -246 190 14 246 190 14 246 190 14 246 190 14
40197 -246 190 14 246 190 14 246 190 14 246 190 14
40198 -246 190 14 246 190 14 246 190 14 246 190 14
40199 -246 186 14 236 178 12 210 162 10 163 110 8
40200 - 61 42 6 138 138 138 218 218 218 250 250 250
40201 -253 253 253 253 253 253 253 253 253 250 250 250
40202 -242 242 242 210 210 210 144 144 144 66 66 66
40203 - 6 6 6 2 2 6 2 2 6 2 2 6
40204 - 2 2 6 2 2 6 61 42 6 163 110 8
40205 -216 158 10 236 178 12 246 190 14 246 190 14
40206 -246 190 14 246 190 14 246 190 14 246 190 14
40207 -246 190 14 246 190 14 246 190 14 246 190 14
40208 -246 190 14 239 182 13 230 174 11 216 158 10
40209 -190 142 34 124 112 88 70 70 70 38 38 38
40210 - 18 18 18 6 6 6 0 0 0 0 0 0
40211 - 0 0 0 0 0 0 0 0 0 0 0 0
40212 - 0 0 0 0 0 0 0 0 0 0 0 0
40213 - 0 0 0 0 0 0 6 6 6 22 22 22
40214 - 62 62 62 168 124 44 206 145 10 224 166 10
40215 -236 178 12 239 182 13 242 186 14 242 186 14
40216 -246 186 14 246 190 14 246 190 14 246 190 14
40217 -246 190 14 246 190 14 246 190 14 246 190 14
40218 -246 190 14 246 190 14 246 190 14 246 190 14
40219 -246 190 14 236 178 12 216 158 10 175 118 6
40220 - 80 54 7 2 2 6 6 6 6 30 30 30
40221 - 54 54 54 62 62 62 50 50 50 38 38 38
40222 - 14 14 14 2 2 6 2 2 6 2 2 6
40223 - 2 2 6 2 2 6 2 2 6 2 2 6
40224 - 2 2 6 6 6 6 80 54 7 167 114 7
40225 -213 154 11 236 178 12 246 190 14 246 190 14
40226 -246 190 14 246 190 14 246 190 14 246 190 14
40227 -246 190 14 242 186 14 239 182 13 239 182 13
40228 -230 174 11 210 150 10 174 135 50 124 112 88
40229 - 82 82 82 54 54 54 34 34 34 18 18 18
40230 - 6 6 6 0 0 0 0 0 0 0 0 0
40231 - 0 0 0 0 0 0 0 0 0 0 0 0
40232 - 0 0 0 0 0 0 0 0 0 0 0 0
40233 - 0 0 0 0 0 0 6 6 6 18 18 18
40234 - 50 50 50 158 118 36 192 133 9 200 144 11
40235 -216 158 10 219 162 10 224 166 10 226 170 11
40236 -230 174 11 236 178 12 239 182 13 239 182 13
40237 -242 186 14 246 186 14 246 190 14 246 190 14
40238 -246 190 14 246 190 14 246 190 14 246 190 14
40239 -246 186 14 230 174 11 210 150 10 163 110 8
40240 -104 69 6 10 10 10 2 2 6 2 2 6
40241 - 2 2 6 2 2 6 2 2 6 2 2 6
40242 - 2 2 6 2 2 6 2 2 6 2 2 6
40243 - 2 2 6 2 2 6 2 2 6 2 2 6
40244 - 2 2 6 6 6 6 91 60 6 167 114 7
40245 -206 145 10 230 174 11 242 186 14 246 190 14
40246 -246 190 14 246 190 14 246 186 14 242 186 14
40247 -239 182 13 230 174 11 224 166 10 213 154 11
40248 -180 133 36 124 112 88 86 86 86 58 58 58
40249 - 38 38 38 22 22 22 10 10 10 6 6 6
40250 - 0 0 0 0 0 0 0 0 0 0 0 0
40251 - 0 0 0 0 0 0 0 0 0 0 0 0
40252 - 0 0 0 0 0 0 0 0 0 0 0 0
40253 - 0 0 0 0 0 0 0 0 0 14 14 14
40254 - 34 34 34 70 70 70 138 110 50 158 118 36
40255 -167 114 7 180 123 7 192 133 9 197 138 11
40256 -200 144 11 206 145 10 213 154 11 219 162 10
40257 -224 166 10 230 174 11 239 182 13 242 186 14
40258 -246 186 14 246 186 14 246 186 14 246 186 14
40259 -239 182 13 216 158 10 185 133 11 152 99 6
40260 -104 69 6 18 14 6 2 2 6 2 2 6
40261 - 2 2 6 2 2 6 2 2 6 2 2 6
40262 - 2 2 6 2 2 6 2 2 6 2 2 6
40263 - 2 2 6 2 2 6 2 2 6 2 2 6
40264 - 2 2 6 6 6 6 80 54 7 152 99 6
40265 -192 133 9 219 162 10 236 178 12 239 182 13
40266 -246 186 14 242 186 14 239 182 13 236 178 12
40267 -224 166 10 206 145 10 192 133 9 154 121 60
40268 - 94 94 94 62 62 62 42 42 42 22 22 22
40269 - 14 14 14 6 6 6 0 0 0 0 0 0
40270 - 0 0 0 0 0 0 0 0 0 0 0 0
40271 - 0 0 0 0 0 0 0 0 0 0 0 0
40272 - 0 0 0 0 0 0 0 0 0 0 0 0
40273 - 0 0 0 0 0 0 0 0 0 6 6 6
40274 - 18 18 18 34 34 34 58 58 58 78 78 78
40275 -101 98 89 124 112 88 142 110 46 156 107 11
40276 -163 110 8 167 114 7 175 118 6 180 123 7
40277 -185 133 11 197 138 11 210 150 10 219 162 10
40278 -226 170 11 236 178 12 236 178 12 234 174 13
40279 -219 162 10 197 138 11 163 110 8 130 83 6
40280 - 91 60 6 10 10 10 2 2 6 2 2 6
40281 - 18 18 18 38 38 38 38 38 38 38 38 38
40282 - 38 38 38 38 38 38 38 38 38 38 38 38
40283 - 38 38 38 38 38 38 26 26 26 2 2 6
40284 - 2 2 6 6 6 6 70 47 6 137 92 6
40285 -175 118 6 200 144 11 219 162 10 230 174 11
40286 -234 174 13 230 174 11 219 162 10 210 150 10
40287 -192 133 9 163 110 8 124 112 88 82 82 82
40288 - 50 50 50 30 30 30 14 14 14 6 6 6
40289 - 0 0 0 0 0 0 0 0 0 0 0 0
40290 - 0 0 0 0 0 0 0 0 0 0 0 0
40291 - 0 0 0 0 0 0 0 0 0 0 0 0
40292 - 0 0 0 0 0 0 0 0 0 0 0 0
40293 - 0 0 0 0 0 0 0 0 0 0 0 0
40294 - 6 6 6 14 14 14 22 22 22 34 34 34
40295 - 42 42 42 58 58 58 74 74 74 86 86 86
40296 -101 98 89 122 102 70 130 98 46 121 87 25
40297 -137 92 6 152 99 6 163 110 8 180 123 7
40298 -185 133 11 197 138 11 206 145 10 200 144 11
40299 -180 123 7 156 107 11 130 83 6 104 69 6
40300 - 50 34 6 54 54 54 110 110 110 101 98 89
40301 - 86 86 86 82 82 82 78 78 78 78 78 78
40302 - 78 78 78 78 78 78 78 78 78 78 78 78
40303 - 78 78 78 82 82 82 86 86 86 94 94 94
40304 -106 106 106 101 101 101 86 66 34 124 80 6
40305 -156 107 11 180 123 7 192 133 9 200 144 11
40306 -206 145 10 200 144 11 192 133 9 175 118 6
40307 -139 102 15 109 106 95 70 70 70 42 42 42
40308 - 22 22 22 10 10 10 0 0 0 0 0 0
40309 - 0 0 0 0 0 0 0 0 0 0 0 0
40310 - 0 0 0 0 0 0 0 0 0 0 0 0
40311 - 0 0 0 0 0 0 0 0 0 0 0 0
40312 - 0 0 0 0 0 0 0 0 0 0 0 0
40313 - 0 0 0 0 0 0 0 0 0 0 0 0
40314 - 0 0 0 0 0 0 6 6 6 10 10 10
40315 - 14 14 14 22 22 22 30 30 30 38 38 38
40316 - 50 50 50 62 62 62 74 74 74 90 90 90
40317 -101 98 89 112 100 78 121 87 25 124 80 6
40318 -137 92 6 152 99 6 152 99 6 152 99 6
40319 -138 86 6 124 80 6 98 70 6 86 66 30
40320 -101 98 89 82 82 82 58 58 58 46 46 46
40321 - 38 38 38 34 34 34 34 34 34 34 34 34
40322 - 34 34 34 34 34 34 34 34 34 34 34 34
40323 - 34 34 34 34 34 34 38 38 38 42 42 42
40324 - 54 54 54 82 82 82 94 86 76 91 60 6
40325 -134 86 6 156 107 11 167 114 7 175 118 6
40326 -175 118 6 167 114 7 152 99 6 121 87 25
40327 -101 98 89 62 62 62 34 34 34 18 18 18
40328 - 6 6 6 0 0 0 0 0 0 0 0 0
40329 - 0 0 0 0 0 0 0 0 0 0 0 0
40330 - 0 0 0 0 0 0 0 0 0 0 0 0
40331 - 0 0 0 0 0 0 0 0 0 0 0 0
40332 - 0 0 0 0 0 0 0 0 0 0 0 0
40333 - 0 0 0 0 0 0 0 0 0 0 0 0
40334 - 0 0 0 0 0 0 0 0 0 0 0 0
40335 - 0 0 0 6 6 6 6 6 6 10 10 10
40336 - 18 18 18 22 22 22 30 30 30 42 42 42
40337 - 50 50 50 66 66 66 86 86 86 101 98 89
40338 -106 86 58 98 70 6 104 69 6 104 69 6
40339 -104 69 6 91 60 6 82 62 34 90 90 90
40340 - 62 62 62 38 38 38 22 22 22 14 14 14
40341 - 10 10 10 10 10 10 10 10 10 10 10 10
40342 - 10 10 10 10 10 10 6 6 6 10 10 10
40343 - 10 10 10 10 10 10 10 10 10 14 14 14
40344 - 22 22 22 42 42 42 70 70 70 89 81 66
40345 - 80 54 7 104 69 6 124 80 6 137 92 6
40346 -134 86 6 116 81 8 100 82 52 86 86 86
40347 - 58 58 58 30 30 30 14 14 14 6 6 6
40348 - 0 0 0 0 0 0 0 0 0 0 0 0
40349 - 0 0 0 0 0 0 0 0 0 0 0 0
40350 - 0 0 0 0 0 0 0 0 0 0 0 0
40351 - 0 0 0 0 0 0 0 0 0 0 0 0
40352 - 0 0 0 0 0 0 0 0 0 0 0 0
40353 - 0 0 0 0 0 0 0 0 0 0 0 0
40354 - 0 0 0 0 0 0 0 0 0 0 0 0
40355 - 0 0 0 0 0 0 0 0 0 0 0 0
40356 - 0 0 0 6 6 6 10 10 10 14 14 14
40357 - 18 18 18 26 26 26 38 38 38 54 54 54
40358 - 70 70 70 86 86 86 94 86 76 89 81 66
40359 - 89 81 66 86 86 86 74 74 74 50 50 50
40360 - 30 30 30 14 14 14 6 6 6 0 0 0
40361 - 0 0 0 0 0 0 0 0 0 0 0 0
40362 - 0 0 0 0 0 0 0 0 0 0 0 0
40363 - 0 0 0 0 0 0 0 0 0 0 0 0
40364 - 6 6 6 18 18 18 34 34 34 58 58 58
40365 - 82 82 82 89 81 66 89 81 66 89 81 66
40366 - 94 86 66 94 86 76 74 74 74 50 50 50
40367 - 26 26 26 14 14 14 6 6 6 0 0 0
40368 - 0 0 0 0 0 0 0 0 0 0 0 0
40369 - 0 0 0 0 0 0 0 0 0 0 0 0
40370 - 0 0 0 0 0 0 0 0 0 0 0 0
40371 - 0 0 0 0 0 0 0 0 0 0 0 0
40372 - 0 0 0 0 0 0 0 0 0 0 0 0
40373 - 0 0 0 0 0 0 0 0 0 0 0 0
40374 - 0 0 0 0 0 0 0 0 0 0 0 0
40375 - 0 0 0 0 0 0 0 0 0 0 0 0
40376 - 0 0 0 0 0 0 0 0 0 0 0 0
40377 - 6 6 6 6 6 6 14 14 14 18 18 18
40378 - 30 30 30 38 38 38 46 46 46 54 54 54
40379 - 50 50 50 42 42 42 30 30 30 18 18 18
40380 - 10 10 10 0 0 0 0 0 0 0 0 0
40381 - 0 0 0 0 0 0 0 0 0 0 0 0
40382 - 0 0 0 0 0 0 0 0 0 0 0 0
40383 - 0 0 0 0 0 0 0 0 0 0 0 0
40384 - 0 0 0 6 6 6 14 14 14 26 26 26
40385 - 38 38 38 50 50 50 58 58 58 58 58 58
40386 - 54 54 54 42 42 42 30 30 30 18 18 18
40387 - 10 10 10 0 0 0 0 0 0 0 0 0
40388 - 0 0 0 0 0 0 0 0 0 0 0 0
40389 - 0 0 0 0 0 0 0 0 0 0 0 0
40390 - 0 0 0 0 0 0 0 0 0 0 0 0
40391 - 0 0 0 0 0 0 0 0 0 0 0 0
40392 - 0 0 0 0 0 0 0 0 0 0 0 0
40393 - 0 0 0 0 0 0 0 0 0 0 0 0
40394 - 0 0 0 0 0 0 0 0 0 0 0 0
40395 - 0 0 0 0 0 0 0 0 0 0 0 0
40396 - 0 0 0 0 0 0 0 0 0 0 0 0
40397 - 0 0 0 0 0 0 0 0 0 6 6 6
40398 - 6 6 6 10 10 10 14 14 14 18 18 18
40399 - 18 18 18 14 14 14 10 10 10 6 6 6
40400 - 0 0 0 0 0 0 0 0 0 0 0 0
40401 - 0 0 0 0 0 0 0 0 0 0 0 0
40402 - 0 0 0 0 0 0 0 0 0 0 0 0
40403 - 0 0 0 0 0 0 0 0 0 0 0 0
40404 - 0 0 0 0 0 0 0 0 0 6 6 6
40405 - 14 14 14 18 18 18 22 22 22 22 22 22
40406 - 18 18 18 14 14 14 10 10 10 6 6 6
40407 - 0 0 0 0 0 0 0 0 0 0 0 0
40408 - 0 0 0 0 0 0 0 0 0 0 0 0
40409 - 0 0 0 0 0 0 0 0 0 0 0 0
40410 - 0 0 0 0 0 0 0 0 0 0 0 0
40411 - 0 0 0 0 0 0 0 0 0 0 0 0
40412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425 +4 4 4 4 4 4
40426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439 +4 4 4 4 4 4
40440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453 +4 4 4 4 4 4
40454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40467 +4 4 4 4 4 4
40468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40481 +4 4 4 4 4 4
40482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40495 +4 4 4 4 4 4
40496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40501 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40506 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40507 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40509 +4 4 4 4 4 4
40510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40515 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40516 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40519 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40520 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40521 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40522 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40523 +4 4 4 4 4 4
40524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40528 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40529 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40530 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40533 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40534 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40535 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40536 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40537 +4 4 4 4 4 4
40538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40542 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40543 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40544 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40547 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40548 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40549 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40550 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40551 +4 4 4 4 4 4
40552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40555 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40556 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40557 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40558 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40559 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40560 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40561 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40562 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40563 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40564 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40565 +4 4 4 4 4 4
40566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40569 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40570 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40571 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40572 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40573 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40574 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40575 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40576 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40577 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40578 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40579 +4 4 4 4 4 4
40580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40583 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40584 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40585 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40586 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40587 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40588 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40589 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40590 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40591 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40592 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40593 +4 4 4 4 4 4
40594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40597 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40598 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40599 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40600 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40601 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40602 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40603 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40604 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40605 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40606 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40607 +4 4 4 4 4 4
40608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40611 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40612 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40613 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40614 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40615 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40616 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40617 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40618 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40619 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40620 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40621 +4 4 4 4 4 4
40622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40624 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40625 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40626 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40627 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40628 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40629 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40630 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40631 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40632 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40633 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40634 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40635 +4 4 4 4 4 4
40636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40637 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40638 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40639 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40640 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40641 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40642 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40643 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40644 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40645 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40646 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40647 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40648 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40649 +4 4 4 4 4 4
40650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40651 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40652 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40653 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40654 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40655 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40656 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40657 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40658 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40659 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40660 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40661 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40662 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40663 +0 0 0 4 4 4
40664 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40665 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40666 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40667 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40668 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40669 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40670 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40671 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40672 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40673 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40674 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40675 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40676 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40677 +2 0 0 0 0 0
40678 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40679 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40680 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40681 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40682 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40683 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40684 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40685 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40686 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40687 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40688 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40689 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40690 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40691 +37 38 37 0 0 0
40692 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40693 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40694 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40695 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40696 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40697 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40698 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40699 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40700 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40701 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40702 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40703 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40704 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40705 +85 115 134 4 0 0
40706 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40707 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40708 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40709 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40710 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40711 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40712 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40713 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40714 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40715 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40716 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40717 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40718 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40719 +60 73 81 4 0 0
40720 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40721 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40722 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40723 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40724 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40725 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40726 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40727 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40728 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40729 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40730 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40731 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40732 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40733 +16 19 21 4 0 0
40734 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40735 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40736 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40737 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40738 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40739 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40740 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40741 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40742 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40743 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40744 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40745 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40746 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40747 +4 0 0 4 3 3
40748 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40749 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40750 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40752 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40753 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40754 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40755 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40756 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40757 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40758 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40759 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40760 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40761 +3 2 2 4 4 4
40762 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40763 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40764 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40765 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40766 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40767 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40768 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40769 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40770 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40771 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40772 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40773 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40774 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40775 +4 4 4 4 4 4
40776 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40777 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40778 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40779 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40780 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40781 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40782 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40783 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40784 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40785 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40786 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40787 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40788 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40789 +4 4 4 4 4 4
40790 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40791 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40792 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40793 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40794 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40795 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40796 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40797 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40798 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40799 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40800 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40801 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40802 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40803 +5 5 5 5 5 5
40804 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40805 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40806 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40807 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40808 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40809 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40810 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40811 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40812 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40813 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40814 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40815 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40816 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40817 +5 5 5 4 4 4
40818 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40819 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40820 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40821 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40822 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40823 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40824 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40825 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40826 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40827 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40828 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40829 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4
40832 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40833 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40834 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40835 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40836 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40837 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40838 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40839 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40840 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40841 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40842 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40843 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4
40846 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40847 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40848 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40849 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40850 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40851 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40852 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40853 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40854 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40855 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40856 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4
40860 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40861 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40862 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40863 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40864 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40865 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40866 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40867 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40868 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40869 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40870 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4
40874 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40875 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40876 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40877 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40878 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40879 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40880 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40881 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40882 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40883 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40884 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4
40888 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40889 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40890 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40891 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40892 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40893 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40894 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40895 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40896 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40897 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40898 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4
40902 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40903 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40904 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40905 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40906 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40907 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40908 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40909 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40910 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40911 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40912 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4
40916 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40917 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40918 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40919 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40920 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40921 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40922 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40923 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40924 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40925 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40926 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4
40930 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40931 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40932 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40933 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40934 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40935 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40936 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40937 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40938 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40939 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40940 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4
40944 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40945 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40946 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40947 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40948 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40949 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40950 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40951 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40952 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40953 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40954 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4
40958 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40959 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40960 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40961 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40962 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40963 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40964 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40965 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40966 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40967 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40968 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971 +4 4 4 4 4 4
40972 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40973 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40974 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40975 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40976 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40977 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40978 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40979 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40980 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40981 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40982 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985 +4 4 4 4 4 4
40986 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40987 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40988 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40989 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40990 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40991 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40992 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40993 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40994 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40995 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40996 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999 +4 4 4 4 4 4
41000 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41001 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41002 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41003 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41004 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41005 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41006 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41007 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41008 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41009 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41010 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013 +4 4 4 4 4 4
41014 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41015 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41016 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41017 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41018 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41019 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41020 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41021 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41022 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41023 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41024 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027 +4 4 4 4 4 4
41028 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41029 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41030 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41031 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41032 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41033 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41034 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41035 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41036 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41037 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41038 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041 +4 4 4 4 4 4
41042 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41043 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41044 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41045 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41046 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41047 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41048 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41049 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41050 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41051 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41052 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055 +4 4 4 4 4 4
41056 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41057 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41058 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41059 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41060 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41061 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41062 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41063 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41064 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41065 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41066 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069 +4 4 4 4 4 4
41070 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41071 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41072 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41073 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41074 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41075 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41076 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41077 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41078 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41079 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41080 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083 +4 4 4 4 4 4
41084 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41085 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41086 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41087 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41088 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41089 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41090 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41091 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41092 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41093 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41094 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097 +4 4 4 4 4 4
41098 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41099 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41100 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41101 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41102 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41103 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41104 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41105 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41106 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41107 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41108 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4
41112 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41113 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41114 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41115 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41116 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41117 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41118 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41119 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41120 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41121 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41122 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4
41126 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41127 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41128 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41129 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41130 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41131 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41132 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41133 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41134 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41135 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41136 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4
41140 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41141 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41142 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41143 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41144 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41145 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41146 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41147 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41148 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41149 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41150 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4
41154 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41155 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41156 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41157 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41158 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41159 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41160 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41161 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41162 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41163 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41164 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4
41168 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41169 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41170 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41171 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41172 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41173 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41174 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41175 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41176 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41177 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41178 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4
41182 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41183 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41184 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41185 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41186 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41187 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41188 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41189 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41190 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41191 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41192 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4
41196 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41197 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41198 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41199 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41200 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41201 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41202 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41204 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41205 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41206 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4
41210 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41211 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41212 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41213 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41214 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41215 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41216 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41217 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41218 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41219 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41220 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4
41224 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41225 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41226 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41227 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41228 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41229 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41230 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41231 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41232 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41233 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4
41238 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41239 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41240 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41241 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41242 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41243 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41244 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41245 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41246 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41247 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251 +4 4 4 4 4 4
41252 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41253 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41254 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41255 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41256 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41257 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41258 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41259 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41260 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41261 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265 +4 4 4 4 4 4
41266 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41267 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41268 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41269 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41270 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41271 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41272 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41273 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41274 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41275 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 4 4 4
41280 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41281 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41282 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41283 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41284 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41285 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41286 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41287 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41288 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 4 4 4
41294 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41295 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41296 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41297 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41298 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41299 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41300 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41301 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41302 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4
41308 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41309 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41310 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41311 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41312 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41313 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41314 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41315 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41316 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4
41322 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41323 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41324 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41325 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41326 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41327 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41328 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41329 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335 +4 4 4 4 4 4
41336 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41337 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41338 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41339 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41340 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41341 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41342 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41343 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349 +4 4 4 4 4 4
41350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41351 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41352 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41353 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41354 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41355 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41356 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41357 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363 +4 4 4 4 4 4
41364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41366 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41367 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41368 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41369 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41370 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41371 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41377 +4 4 4 4 4 4
41378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41379 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41380 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41381 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41382 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41383 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41384 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41385 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41391 +4 4 4 4 4 4
41392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41395 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41396 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41397 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41398 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41399 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41405 +4 4 4 4 4 4
41406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41409 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41410 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41411 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41412 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41418 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41419 +4 4 4 4 4 4
41420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41423 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41424 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41425 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41426 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41432 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41433 +4 4 4 4 4 4
41434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41437 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41438 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41439 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41440 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41446 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41447 +4 4 4 4 4 4
41448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41451 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41452 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41453 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41454 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41461 +4 4 4 4 4 4
41462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41466 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41467 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41468 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41475 +4 4 4 4 4 4
41476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41480 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41481 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41482 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41489 +4 4 4 4 4 4
41490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41494 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41495 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41496 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41503 +4 4 4 4 4 4
41504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41508 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41509 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41517 +4 4 4 4 4 4
41518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41522 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41523 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41531 +4 4 4 4 4 4
41532 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41533 index a40c05e..785c583 100644
41534 --- a/drivers/video/udlfb.c
41535 +++ b/drivers/video/udlfb.c
41536 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41537 dlfb_urb_completion(urb);
41538
41539 error:
41540 - atomic_add(bytes_sent, &dev->bytes_sent);
41541 - atomic_add(bytes_identical, &dev->bytes_identical);
41542 - atomic_add(width*height*2, &dev->bytes_rendered);
41543 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41544 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41545 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41546 end_cycles = get_cycles();
41547 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41548 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41549 >> 10)), /* Kcycles */
41550 &dev->cpu_kcycles_used);
41551
41552 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41553 dlfb_urb_completion(urb);
41554
41555 error:
41556 - atomic_add(bytes_sent, &dev->bytes_sent);
41557 - atomic_add(bytes_identical, &dev->bytes_identical);
41558 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41559 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41560 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41561 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41562 end_cycles = get_cycles();
41563 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41564 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41565 >> 10)), /* Kcycles */
41566 &dev->cpu_kcycles_used);
41567 }
41568 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41569 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41570 struct dlfb_data *dev = fb_info->par;
41571 return snprintf(buf, PAGE_SIZE, "%u\n",
41572 - atomic_read(&dev->bytes_rendered));
41573 + atomic_read_unchecked(&dev->bytes_rendered));
41574 }
41575
41576 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41577 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41578 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41579 struct dlfb_data *dev = fb_info->par;
41580 return snprintf(buf, PAGE_SIZE, "%u\n",
41581 - atomic_read(&dev->bytes_identical));
41582 + atomic_read_unchecked(&dev->bytes_identical));
41583 }
41584
41585 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41586 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41587 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41588 struct dlfb_data *dev = fb_info->par;
41589 return snprintf(buf, PAGE_SIZE, "%u\n",
41590 - atomic_read(&dev->bytes_sent));
41591 + atomic_read_unchecked(&dev->bytes_sent));
41592 }
41593
41594 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41595 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41596 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41597 struct dlfb_data *dev = fb_info->par;
41598 return snprintf(buf, PAGE_SIZE, "%u\n",
41599 - atomic_read(&dev->cpu_kcycles_used));
41600 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41601 }
41602
41603 static ssize_t edid_show(
41604 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41605 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41606 struct dlfb_data *dev = fb_info->par;
41607
41608 - atomic_set(&dev->bytes_rendered, 0);
41609 - atomic_set(&dev->bytes_identical, 0);
41610 - atomic_set(&dev->bytes_sent, 0);
41611 - atomic_set(&dev->cpu_kcycles_used, 0);
41612 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41613 + atomic_set_unchecked(&dev->bytes_identical, 0);
41614 + atomic_set_unchecked(&dev->bytes_sent, 0);
41615 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41616
41617 return count;
41618 }
41619 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41620 index 8408543..357841c 100644
41621 --- a/drivers/video/uvesafb.c
41622 +++ b/drivers/video/uvesafb.c
41623 @@ -19,6 +19,7 @@
41624 #include <linux/io.h>
41625 #include <linux/mutex.h>
41626 #include <linux/slab.h>
41627 +#include <linux/moduleloader.h>
41628 #include <video/edid.h>
41629 #include <video/uvesafb.h>
41630 #ifdef CONFIG_X86
41631 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41632 NULL,
41633 };
41634
41635 - return call_usermodehelper(v86d_path, argv, envp, 1);
41636 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41637 }
41638
41639 /*
41640 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41641 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41642 par->pmi_setpal = par->ypan = 0;
41643 } else {
41644 +
41645 +#ifdef CONFIG_PAX_KERNEXEC
41646 +#ifdef CONFIG_MODULES
41647 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41648 +#endif
41649 + if (!par->pmi_code) {
41650 + par->pmi_setpal = par->ypan = 0;
41651 + return 0;
41652 + }
41653 +#endif
41654 +
41655 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41656 + task->t.regs.edi);
41657 +
41658 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41659 + pax_open_kernel();
41660 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41661 + pax_close_kernel();
41662 +
41663 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41664 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41665 +#else
41666 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41667 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41668 +#endif
41669 +
41670 printk(KERN_INFO "uvesafb: protected mode interface info at "
41671 "%04x:%04x\n",
41672 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41673 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41674 par->ypan = ypan;
41675
41676 if (par->pmi_setpal || par->ypan) {
41677 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41678 if (__supported_pte_mask & _PAGE_NX) {
41679 par->pmi_setpal = par->ypan = 0;
41680 printk(KERN_WARNING "uvesafb: NX protection is actively."
41681 "We have better not to use the PMI.\n");
41682 - } else {
41683 + } else
41684 +#endif
41685 uvesafb_vbe_getpmi(task, par);
41686 - }
41687 }
41688 #else
41689 /* The protected mode interface is not available on non-x86. */
41690 @@ -1828,6 +1852,11 @@ out:
41691 if (par->vbe_modes)
41692 kfree(par->vbe_modes);
41693
41694 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41695 + if (par->pmi_code)
41696 + module_free_exec(NULL, par->pmi_code);
41697 +#endif
41698 +
41699 framebuffer_release(info);
41700 return err;
41701 }
41702 @@ -1854,6 +1883,12 @@ static int uvesafb_remove(struct platform_device *dev)
41703 kfree(par->vbe_state_orig);
41704 if (par->vbe_state_saved)
41705 kfree(par->vbe_state_saved);
41706 +
41707 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41708 + if (par->pmi_code)
41709 + module_free_exec(NULL, par->pmi_code);
41710 +#endif
41711 +
41712 }
41713
41714 framebuffer_release(info);
41715 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41716 index 501b340..86bd4cf 100644
41717 --- a/drivers/video/vesafb.c
41718 +++ b/drivers/video/vesafb.c
41719 @@ -9,6 +9,7 @@
41720 */
41721
41722 #include <linux/module.h>
41723 +#include <linux/moduleloader.h>
41724 #include <linux/kernel.h>
41725 #include <linux/errno.h>
41726 #include <linux/string.h>
41727 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41728 static int vram_total __initdata; /* Set total amount of memory */
41729 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41730 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41731 -static void (*pmi_start)(void) __read_mostly;
41732 -static void (*pmi_pal) (void) __read_mostly;
41733 +static void (*pmi_start)(void) __read_only;
41734 +static void (*pmi_pal) (void) __read_only;
41735 static int depth __read_mostly;
41736 static int vga_compat __read_mostly;
41737 /* --------------------------------------------------------------------- */
41738 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41739 unsigned int size_vmode;
41740 unsigned int size_remap;
41741 unsigned int size_total;
41742 + void *pmi_code = NULL;
41743
41744 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41745 return -ENODEV;
41746 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41747 size_remap = size_total;
41748 vesafb_fix.smem_len = size_remap;
41749
41750 -#ifndef __i386__
41751 - screen_info.vesapm_seg = 0;
41752 -#endif
41753 -
41754 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41755 printk(KERN_WARNING
41756 "vesafb: cannot reserve video memory at 0x%lx\n",
41757 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41758 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41759 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41760
41761 +#ifdef __i386__
41762 +
41763 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41764 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41765 + if (!pmi_code)
41766 +#elif !defined(CONFIG_PAX_KERNEXEC)
41767 + if (0)
41768 +#endif
41769 +
41770 +#endif
41771 + screen_info.vesapm_seg = 0;
41772 +
41773 if (screen_info.vesapm_seg) {
41774 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41775 - screen_info.vesapm_seg,screen_info.vesapm_off);
41776 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41777 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41778 }
41779
41780 if (screen_info.vesapm_seg < 0xc000)
41781 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41782
41783 if (ypan || pmi_setpal) {
41784 unsigned short *pmi_base;
41785 +
41786 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41787 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41788 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41789 +
41790 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41791 + pax_open_kernel();
41792 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41793 +#else
41794 + pmi_code = pmi_base;
41795 +#endif
41796 +
41797 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41798 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41799 +
41800 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41801 + pmi_start = ktva_ktla(pmi_start);
41802 + pmi_pal = ktva_ktla(pmi_pal);
41803 + pax_close_kernel();
41804 +#endif
41805 +
41806 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41807 if (pmi_base[3]) {
41808 printk(KERN_INFO "vesafb: pmi: ports = ");
41809 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41810 info->node, info->fix.id);
41811 return 0;
41812 err:
41813 +
41814 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41815 + module_free_exec(NULL, pmi_code);
41816 +#endif
41817 +
41818 if (info->screen_base)
41819 iounmap(info->screen_base);
41820 framebuffer_release(info);
41821 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41822 index 88714ae..16c2e11 100644
41823 --- a/drivers/video/via/via_clock.h
41824 +++ b/drivers/video/via/via_clock.h
41825 @@ -56,7 +56,7 @@ struct via_clock {
41826
41827 void (*set_engine_pll_state)(u8 state);
41828 void (*set_engine_pll)(struct via_pll_config config);
41829 -};
41830 +} __no_const;
41831
41832
41833 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41834 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41835 index e56c934..fc22f4b 100644
41836 --- a/drivers/xen/xen-pciback/conf_space.h
41837 +++ b/drivers/xen/xen-pciback/conf_space.h
41838 @@ -44,15 +44,15 @@ struct config_field {
41839 struct {
41840 conf_dword_write write;
41841 conf_dword_read read;
41842 - } dw;
41843 + } __no_const dw;
41844 struct {
41845 conf_word_write write;
41846 conf_word_read read;
41847 - } w;
41848 + } __no_const w;
41849 struct {
41850 conf_byte_write write;
41851 conf_byte_read read;
41852 - } b;
41853 + } __no_const b;
41854 } u;
41855 struct list_head list;
41856 };
41857 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41858 index 014c8dd..6f3dfe6 100644
41859 --- a/fs/9p/vfs_inode.c
41860 +++ b/fs/9p/vfs_inode.c
41861 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41862 void
41863 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41864 {
41865 - char *s = nd_get_link(nd);
41866 + const char *s = nd_get_link(nd);
41867
41868 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41869 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41870 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41871 index e95d1b6..3454244 100644
41872 --- a/fs/Kconfig.binfmt
41873 +++ b/fs/Kconfig.binfmt
41874 @@ -89,7 +89,7 @@ config HAVE_AOUT
41875
41876 config BINFMT_AOUT
41877 tristate "Kernel support for a.out and ECOFF binaries"
41878 - depends on HAVE_AOUT
41879 + depends on HAVE_AOUT && BROKEN
41880 ---help---
41881 A.out (Assembler.OUTput) is a set of formats for libraries and
41882 executables used in the earliest versions of UNIX. Linux used
41883 diff --git a/fs/aio.c b/fs/aio.c
41884 index b9d64d8..86cb1d5 100644
41885 --- a/fs/aio.c
41886 +++ b/fs/aio.c
41887 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41888 size += sizeof(struct io_event) * nr_events;
41889 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41890
41891 - if (nr_pages < 0)
41892 + if (nr_pages <= 0)
41893 return -EINVAL;
41894
41895 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41896 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41897 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41898 {
41899 ssize_t ret;
41900 + struct iovec iovstack;
41901
41902 #ifdef CONFIG_COMPAT
41903 if (compat)
41904 ret = compat_rw_copy_check_uvector(type,
41905 (struct compat_iovec __user *)kiocb->ki_buf,
41906 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41907 + kiocb->ki_nbytes, 1, &iovstack,
41908 &kiocb->ki_iovec, 1);
41909 else
41910 #endif
41911 ret = rw_copy_check_uvector(type,
41912 (struct iovec __user *)kiocb->ki_buf,
41913 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41914 + kiocb->ki_nbytes, 1, &iovstack,
41915 &kiocb->ki_iovec, 1);
41916 if (ret < 0)
41917 goto out;
41918
41919 + if (kiocb->ki_iovec == &iovstack) {
41920 + kiocb->ki_inline_vec = iovstack;
41921 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41922 + }
41923 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41924 kiocb->ki_cur_seg = 0;
41925 /* ki_nbytes/left now reflect bytes instead of segs */
41926 diff --git a/fs/attr.c b/fs/attr.c
41927 index 95053ad..2cc93ca 100644
41928 --- a/fs/attr.c
41929 +++ b/fs/attr.c
41930 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41931 unsigned long limit;
41932
41933 limit = rlimit(RLIMIT_FSIZE);
41934 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41935 if (limit != RLIM_INFINITY && offset > limit)
41936 goto out_sig;
41937 if (offset > inode->i_sb->s_maxbytes)
41938 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41939 index 9c098db..c755da5 100644
41940 --- a/fs/autofs4/waitq.c
41941 +++ b/fs/autofs4/waitq.c
41942 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41943 {
41944 unsigned long sigpipe, flags;
41945 mm_segment_t fs;
41946 - const char *data = (const char *)addr;
41947 + const char __user *data = (const char __force_user *)addr;
41948 ssize_t wr = 0;
41949
41950 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41951 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41952 index 6e6d536..457113a 100644
41953 --- a/fs/befs/linuxvfs.c
41954 +++ b/fs/befs/linuxvfs.c
41955 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41956 {
41957 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41958 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41959 - char *link = nd_get_link(nd);
41960 + const char *link = nd_get_link(nd);
41961 if (!IS_ERR(link))
41962 kfree(link);
41963 }
41964 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41965 index 1ff9405..f1e376a 100644
41966 --- a/fs/binfmt_aout.c
41967 +++ b/fs/binfmt_aout.c
41968 @@ -16,6 +16,7 @@
41969 #include <linux/string.h>
41970 #include <linux/fs.h>
41971 #include <linux/file.h>
41972 +#include <linux/security.h>
41973 #include <linux/stat.h>
41974 #include <linux/fcntl.h>
41975 #include <linux/ptrace.h>
41976 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41977 #endif
41978 # define START_STACK(u) ((void __user *)u.start_stack)
41979
41980 + memset(&dump, 0, sizeof(dump));
41981 +
41982 fs = get_fs();
41983 set_fs(KERNEL_DS);
41984 has_dumped = 1;
41985 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41986
41987 /* If the size of the dump file exceeds the rlimit, then see what would happen
41988 if we wrote the stack, but not the data area. */
41989 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41990 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41991 dump.u_dsize = 0;
41992
41993 /* Make sure we have enough room to write the stack and data areas. */
41994 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41995 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41996 dump.u_ssize = 0;
41997
41998 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41999 rlim = rlimit(RLIMIT_DATA);
42000 if (rlim >= RLIM_INFINITY)
42001 rlim = ~0;
42002 +
42003 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42004 if (ex.a_data + ex.a_bss > rlim)
42005 return -ENOMEM;
42006
42007 @@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42008 install_exec_creds(bprm);
42009 current->flags &= ~PF_FORKNOEXEC;
42010
42011 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42012 + current->mm->pax_flags = 0UL;
42013 +#endif
42014 +
42015 +#ifdef CONFIG_PAX_PAGEEXEC
42016 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42017 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42018 +
42019 +#ifdef CONFIG_PAX_EMUTRAMP
42020 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42021 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42022 +#endif
42023 +
42024 +#ifdef CONFIG_PAX_MPROTECT
42025 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42026 + current->mm->pax_flags |= MF_PAX_MPROTECT;
42027 +#endif
42028 +
42029 + }
42030 +#endif
42031 +
42032 if (N_MAGIC(ex) == OMAGIC) {
42033 unsigned long text_addr, map_size;
42034 loff_t pos;
42035 @@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42036
42037 down_write(&current->mm->mmap_sem);
42038 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42039 - PROT_READ | PROT_WRITE | PROT_EXEC,
42040 + PROT_READ | PROT_WRITE,
42041 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42042 fd_offset + ex.a_text);
42043 up_write(&current->mm->mmap_sem);
42044 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42045 index 07d096c..851a18b 100644
42046 --- a/fs/binfmt_elf.c
42047 +++ b/fs/binfmt_elf.c
42048 @@ -32,6 +32,7 @@
42049 #include <linux/elf.h>
42050 #include <linux/utsname.h>
42051 #include <linux/coredump.h>
42052 +#include <linux/xattr.h>
42053 #include <asm/uaccess.h>
42054 #include <asm/param.h>
42055 #include <asm/page.h>
42056 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42057 #define elf_core_dump NULL
42058 #endif
42059
42060 +#ifdef CONFIG_PAX_MPROTECT
42061 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42062 +#endif
42063 +
42064 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42065 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42066 #else
42067 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42068 .load_binary = load_elf_binary,
42069 .load_shlib = load_elf_library,
42070 .core_dump = elf_core_dump,
42071 +
42072 +#ifdef CONFIG_PAX_MPROTECT
42073 + .handle_mprotect= elf_handle_mprotect,
42074 +#endif
42075 +
42076 .min_coredump = ELF_EXEC_PAGESIZE,
42077 };
42078
42079 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42080
42081 static int set_brk(unsigned long start, unsigned long end)
42082 {
42083 + unsigned long e = end;
42084 +
42085 start = ELF_PAGEALIGN(start);
42086 end = ELF_PAGEALIGN(end);
42087 if (end > start) {
42088 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42089 if (BAD_ADDR(addr))
42090 return addr;
42091 }
42092 - current->mm->start_brk = current->mm->brk = end;
42093 + current->mm->start_brk = current->mm->brk = e;
42094 return 0;
42095 }
42096
42097 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42098 elf_addr_t __user *u_rand_bytes;
42099 const char *k_platform = ELF_PLATFORM;
42100 const char *k_base_platform = ELF_BASE_PLATFORM;
42101 - unsigned char k_rand_bytes[16];
42102 + u32 k_rand_bytes[4];
42103 int items;
42104 elf_addr_t *elf_info;
42105 int ei_index = 0;
42106 const struct cred *cred = current_cred();
42107 struct vm_area_struct *vma;
42108 + unsigned long saved_auxv[AT_VECTOR_SIZE];
42109
42110 /*
42111 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42112 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42113 * Generate 16 random bytes for userspace PRNG seeding.
42114 */
42115 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42116 - u_rand_bytes = (elf_addr_t __user *)
42117 - STACK_ALLOC(p, sizeof(k_rand_bytes));
42118 + srandom32(k_rand_bytes[0] ^ random32());
42119 + srandom32(k_rand_bytes[1] ^ random32());
42120 + srandom32(k_rand_bytes[2] ^ random32());
42121 + srandom32(k_rand_bytes[3] ^ random32());
42122 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42123 + u_rand_bytes = (elf_addr_t __user *) p;
42124 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42125 return -EFAULT;
42126
42127 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42128 return -EFAULT;
42129 current->mm->env_end = p;
42130
42131 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42132 +
42133 /* Put the elf_info on the stack in the right place. */
42134 sp = (elf_addr_t __user *)envp + 1;
42135 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42136 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42137 return -EFAULT;
42138 return 0;
42139 }
42140 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42141 {
42142 struct elf_phdr *elf_phdata;
42143 struct elf_phdr *eppnt;
42144 - unsigned long load_addr = 0;
42145 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42146 int load_addr_set = 0;
42147 unsigned long last_bss = 0, elf_bss = 0;
42148 - unsigned long error = ~0UL;
42149 + unsigned long error = -EINVAL;
42150 unsigned long total_size;
42151 int retval, i, size;
42152
42153 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42154 goto out_close;
42155 }
42156
42157 +#ifdef CONFIG_PAX_SEGMEXEC
42158 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42159 + pax_task_size = SEGMEXEC_TASK_SIZE;
42160 +#endif
42161 +
42162 eppnt = elf_phdata;
42163 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42164 if (eppnt->p_type == PT_LOAD) {
42165 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42166 k = load_addr + eppnt->p_vaddr;
42167 if (BAD_ADDR(k) ||
42168 eppnt->p_filesz > eppnt->p_memsz ||
42169 - eppnt->p_memsz > TASK_SIZE ||
42170 - TASK_SIZE - eppnt->p_memsz < k) {
42171 + eppnt->p_memsz > pax_task_size ||
42172 + pax_task_size - eppnt->p_memsz < k) {
42173 error = -ENOMEM;
42174 goto out_close;
42175 }
42176 @@ -528,6 +552,351 @@ out:
42177 return error;
42178 }
42179
42180 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42181 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42182 +{
42183 + unsigned long pax_flags = 0UL;
42184 +
42185 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42186 +
42187 +#ifdef CONFIG_PAX_PAGEEXEC
42188 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42189 + pax_flags |= MF_PAX_PAGEEXEC;
42190 +#endif
42191 +
42192 +#ifdef CONFIG_PAX_SEGMEXEC
42193 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42194 + pax_flags |= MF_PAX_SEGMEXEC;
42195 +#endif
42196 +
42197 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42198 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42199 + if ((__supported_pte_mask & _PAGE_NX))
42200 + pax_flags &= ~MF_PAX_SEGMEXEC;
42201 + else
42202 + pax_flags &= ~MF_PAX_PAGEEXEC;
42203 + }
42204 +#endif
42205 +
42206 +#ifdef CONFIG_PAX_EMUTRAMP
42207 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42208 + pax_flags |= MF_PAX_EMUTRAMP;
42209 +#endif
42210 +
42211 +#ifdef CONFIG_PAX_MPROTECT
42212 + if (elf_phdata->p_flags & PF_MPROTECT)
42213 + pax_flags |= MF_PAX_MPROTECT;
42214 +#endif
42215 +
42216 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42217 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42218 + pax_flags |= MF_PAX_RANDMMAP;
42219 +#endif
42220 +
42221 +#endif
42222 +
42223 + return pax_flags;
42224 +}
42225 +
42226 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42227 +{
42228 + unsigned long pax_flags = 0UL;
42229 +
42230 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42231 +
42232 +#ifdef CONFIG_PAX_PAGEEXEC
42233 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42234 + pax_flags |= MF_PAX_PAGEEXEC;
42235 +#endif
42236 +
42237 +#ifdef CONFIG_PAX_SEGMEXEC
42238 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42239 + pax_flags |= MF_PAX_SEGMEXEC;
42240 +#endif
42241 +
42242 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42243 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42244 + if ((__supported_pte_mask & _PAGE_NX))
42245 + pax_flags &= ~MF_PAX_SEGMEXEC;
42246 + else
42247 + pax_flags &= ~MF_PAX_PAGEEXEC;
42248 + }
42249 +#endif
42250 +
42251 +#ifdef CONFIG_PAX_EMUTRAMP
42252 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42253 + pax_flags |= MF_PAX_EMUTRAMP;
42254 +#endif
42255 +
42256 +#ifdef CONFIG_PAX_MPROTECT
42257 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42258 + pax_flags |= MF_PAX_MPROTECT;
42259 +#endif
42260 +
42261 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42262 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42263 + pax_flags |= MF_PAX_RANDMMAP;
42264 +#endif
42265 +
42266 +#endif
42267 +
42268 + return pax_flags;
42269 +}
42270 +
42271 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42272 +{
42273 + unsigned long pax_flags = 0UL;
42274 +
42275 +#ifdef CONFIG_PAX_EI_PAX
42276 +
42277 +#ifdef CONFIG_PAX_PAGEEXEC
42278 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42279 + pax_flags |= MF_PAX_PAGEEXEC;
42280 +#endif
42281 +
42282 +#ifdef CONFIG_PAX_SEGMEXEC
42283 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42284 + pax_flags |= MF_PAX_SEGMEXEC;
42285 +#endif
42286 +
42287 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42288 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42289 + if ((__supported_pte_mask & _PAGE_NX))
42290 + pax_flags &= ~MF_PAX_SEGMEXEC;
42291 + else
42292 + pax_flags &= ~MF_PAX_PAGEEXEC;
42293 + }
42294 +#endif
42295 +
42296 +#ifdef CONFIG_PAX_EMUTRAMP
42297 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42298 + pax_flags |= MF_PAX_EMUTRAMP;
42299 +#endif
42300 +
42301 +#ifdef CONFIG_PAX_MPROTECT
42302 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42303 + pax_flags |= MF_PAX_MPROTECT;
42304 +#endif
42305 +
42306 +#ifdef CONFIG_PAX_ASLR
42307 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42308 + pax_flags |= MF_PAX_RANDMMAP;
42309 +#endif
42310 +
42311 +#else
42312 +
42313 +#ifdef CONFIG_PAX_PAGEEXEC
42314 + pax_flags |= MF_PAX_PAGEEXEC;
42315 +#endif
42316 +
42317 +#ifdef CONFIG_PAX_MPROTECT
42318 + pax_flags |= MF_PAX_MPROTECT;
42319 +#endif
42320 +
42321 +#ifdef CONFIG_PAX_RANDMMAP
42322 + pax_flags |= MF_PAX_RANDMMAP;
42323 +#endif
42324 +
42325 +#ifdef CONFIG_PAX_SEGMEXEC
42326 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
42327 + pax_flags &= ~MF_PAX_PAGEEXEC;
42328 + pax_flags |= MF_PAX_SEGMEXEC;
42329 + }
42330 +#endif
42331 +
42332 +#endif
42333 +
42334 + return pax_flags;
42335 +}
42336 +
42337 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42338 +{
42339 +
42340 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42341 + unsigned long i;
42342 +
42343 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42344 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42345 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42346 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42347 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42348 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42349 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42350 + return ~0UL;
42351 +
42352 +#ifdef CONFIG_PAX_SOFTMODE
42353 + if (pax_softmode)
42354 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42355 + else
42356 +#endif
42357 +
42358 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42359 + break;
42360 + }
42361 +#endif
42362 +
42363 + return ~0UL;
42364 +}
42365 +
42366 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42367 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42368 +{
42369 + unsigned long pax_flags = 0UL;
42370 +
42371 +#ifdef CONFIG_PAX_PAGEEXEC
42372 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42373 + pax_flags |= MF_PAX_PAGEEXEC;
42374 +#endif
42375 +
42376 +#ifdef CONFIG_PAX_SEGMEXEC
42377 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42378 + pax_flags |= MF_PAX_SEGMEXEC;
42379 +#endif
42380 +
42381 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42382 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42383 + if ((__supported_pte_mask & _PAGE_NX))
42384 + pax_flags &= ~MF_PAX_SEGMEXEC;
42385 + else
42386 + pax_flags &= ~MF_PAX_PAGEEXEC;
42387 + }
42388 +#endif
42389 +
42390 +#ifdef CONFIG_PAX_EMUTRAMP
42391 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42392 + pax_flags |= MF_PAX_EMUTRAMP;
42393 +#endif
42394 +
42395 +#ifdef CONFIG_PAX_MPROTECT
42396 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42397 + pax_flags |= MF_PAX_MPROTECT;
42398 +#endif
42399 +
42400 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42401 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42402 + pax_flags |= MF_PAX_RANDMMAP;
42403 +#endif
42404 +
42405 + return pax_flags;
42406 +}
42407 +
42408 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42409 +{
42410 + unsigned long pax_flags = 0UL;
42411 +
42412 +#ifdef CONFIG_PAX_PAGEEXEC
42413 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42414 + pax_flags |= MF_PAX_PAGEEXEC;
42415 +#endif
42416 +
42417 +#ifdef CONFIG_PAX_SEGMEXEC
42418 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42419 + pax_flags |= MF_PAX_SEGMEXEC;
42420 +#endif
42421 +
42422 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42423 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42424 + if ((__supported_pte_mask & _PAGE_NX))
42425 + pax_flags &= ~MF_PAX_SEGMEXEC;
42426 + else
42427 + pax_flags &= ~MF_PAX_PAGEEXEC;
42428 + }
42429 +#endif
42430 +
42431 +#ifdef CONFIG_PAX_EMUTRAMP
42432 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42433 + pax_flags |= MF_PAX_EMUTRAMP;
42434 +#endif
42435 +
42436 +#ifdef CONFIG_PAX_MPROTECT
42437 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42438 + pax_flags |= MF_PAX_MPROTECT;
42439 +#endif
42440 +
42441 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42442 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42443 + pax_flags |= MF_PAX_RANDMMAP;
42444 +#endif
42445 +
42446 + return pax_flags;
42447 +}
42448 +#endif
42449 +
42450 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42451 +{
42452 +
42453 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42454 + ssize_t xattr_size, i;
42455 + unsigned char xattr_value[5];
42456 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42457 +
42458 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42459 + if (xattr_size <= 0)
42460 + return ~0UL;
42461 +
42462 + for (i = 0; i < xattr_size; i++)
42463 + switch (xattr_value[i]) {
42464 + default:
42465 + return ~0UL;
42466 +
42467 +#define parse_flag(option1, option2, flag) \
42468 + case option1: \
42469 + pax_flags_hardmode |= MF_PAX_##flag; \
42470 + break; \
42471 + case option2: \
42472 + pax_flags_softmode |= MF_PAX_##flag; \
42473 + break;
42474 +
42475 + parse_flag('p', 'P', PAGEEXEC);
42476 + parse_flag('e', 'E', EMUTRAMP);
42477 + parse_flag('m', 'M', MPROTECT);
42478 + parse_flag('r', 'R', RANDMMAP);
42479 + parse_flag('s', 'S', SEGMEXEC);
42480 +
42481 +#undef parse_flag
42482 + }
42483 +
42484 + if (pax_flags_hardmode & pax_flags_softmode)
42485 + return ~0UL;
42486 +
42487 +#ifdef CONFIG_PAX_SOFTMODE
42488 + if (pax_softmode)
42489 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42490 + else
42491 +#endif
42492 +
42493 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42494 +#else
42495 + return ~0UL;
42496 +#endif
42497 +
42498 +}
42499 +
42500 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42501 +{
42502 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42503 +
42504 + pax_flags = pax_parse_ei_pax(elf_ex);
42505 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42506 + xattr_pax_flags = pax_parse_xattr_pax(file);
42507 +
42508 + if (pt_pax_flags == ~0UL)
42509 + pt_pax_flags = xattr_pax_flags;
42510 + else if (xattr_pax_flags == ~0UL)
42511 + xattr_pax_flags = pt_pax_flags;
42512 + if (pt_pax_flags != xattr_pax_flags)
42513 + return -EINVAL;
42514 + if (pt_pax_flags != ~0UL)
42515 + pax_flags = pt_pax_flags;
42516 +
42517 + if (0 > pax_check_flags(&pax_flags))
42518 + return -EINVAL;
42519 +
42520 + current->mm->pax_flags = pax_flags;
42521 + return 0;
42522 +}
42523 +#endif
42524 +
42525 /*
42526 * These are the functions used to load ELF style executables and shared
42527 * libraries. There is no binary dependent code anywhere else.
42528 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42529 {
42530 unsigned int random_variable = 0;
42531
42532 +#ifdef CONFIG_PAX_RANDUSTACK
42533 + if (randomize_va_space)
42534 + return stack_top - current->mm->delta_stack;
42535 +#endif
42536 +
42537 if ((current->flags & PF_RANDOMIZE) &&
42538 !(current->personality & ADDR_NO_RANDOMIZE)) {
42539 random_variable = get_random_int() & STACK_RND_MASK;
42540 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42541 unsigned long load_addr = 0, load_bias = 0;
42542 int load_addr_set = 0;
42543 char * elf_interpreter = NULL;
42544 - unsigned long error;
42545 + unsigned long error = 0;
42546 struct elf_phdr *elf_ppnt, *elf_phdata;
42547 unsigned long elf_bss, elf_brk;
42548 int retval, i;
42549 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42550 unsigned long start_code, end_code, start_data, end_data;
42551 unsigned long reloc_func_desc __maybe_unused = 0;
42552 int executable_stack = EXSTACK_DEFAULT;
42553 - unsigned long def_flags = 0;
42554 struct {
42555 struct elfhdr elf_ex;
42556 struct elfhdr interp_elf_ex;
42557 } *loc;
42558 + unsigned long pax_task_size = TASK_SIZE;
42559
42560 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42561 if (!loc) {
42562 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42563
42564 /* OK, This is the point of no return */
42565 current->flags &= ~PF_FORKNOEXEC;
42566 - current->mm->def_flags = def_flags;
42567 +
42568 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42569 + current->mm->pax_flags = 0UL;
42570 +#endif
42571 +
42572 +#ifdef CONFIG_PAX_DLRESOLVE
42573 + current->mm->call_dl_resolve = 0UL;
42574 +#endif
42575 +
42576 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42577 + current->mm->call_syscall = 0UL;
42578 +#endif
42579 +
42580 +#ifdef CONFIG_PAX_ASLR
42581 + current->mm->delta_mmap = 0UL;
42582 + current->mm->delta_stack = 0UL;
42583 +#endif
42584 +
42585 + current->mm->def_flags = 0;
42586 +
42587 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42588 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42589 + send_sig(SIGKILL, current, 0);
42590 + goto out_free_dentry;
42591 + }
42592 +#endif
42593 +
42594 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42595 + pax_set_initial_flags(bprm);
42596 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42597 + if (pax_set_initial_flags_func)
42598 + (pax_set_initial_flags_func)(bprm);
42599 +#endif
42600 +
42601 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42602 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42603 + current->mm->context.user_cs_limit = PAGE_SIZE;
42604 + current->mm->def_flags |= VM_PAGEEXEC;
42605 + }
42606 +#endif
42607 +
42608 +#ifdef CONFIG_PAX_SEGMEXEC
42609 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42610 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42611 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42612 + pax_task_size = SEGMEXEC_TASK_SIZE;
42613 + current->mm->def_flags |= VM_NOHUGEPAGE;
42614 + }
42615 +#endif
42616 +
42617 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42618 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42619 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42620 + put_cpu();
42621 + }
42622 +#endif
42623
42624 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42625 may depend on the personality. */
42626 SET_PERSONALITY(loc->elf_ex);
42627 +
42628 +#ifdef CONFIG_PAX_ASLR
42629 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42630 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42631 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42632 + }
42633 +#endif
42634 +
42635 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42636 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42637 + executable_stack = EXSTACK_DISABLE_X;
42638 + current->personality &= ~READ_IMPLIES_EXEC;
42639 + } else
42640 +#endif
42641 +
42642 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42643 current->personality |= READ_IMPLIES_EXEC;
42644
42645 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42646 #else
42647 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42648 #endif
42649 +
42650 +#ifdef CONFIG_PAX_RANDMMAP
42651 + /* PaX: randomize base address at the default exe base if requested */
42652 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42653 +#ifdef CONFIG_SPARC64
42654 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42655 +#else
42656 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42657 +#endif
42658 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42659 + elf_flags |= MAP_FIXED;
42660 + }
42661 +#endif
42662 +
42663 }
42664
42665 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42666 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42667 * allowed task size. Note that p_filesz must always be
42668 * <= p_memsz so it is only necessary to check p_memsz.
42669 */
42670 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42671 - elf_ppnt->p_memsz > TASK_SIZE ||
42672 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42673 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42674 + elf_ppnt->p_memsz > pax_task_size ||
42675 + pax_task_size - elf_ppnt->p_memsz < k) {
42676 /* set_brk can never work. Avoid overflows. */
42677 send_sig(SIGKILL, current, 0);
42678 retval = -EINVAL;
42679 @@ -881,11 +1339,36 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42680 goto out_free_dentry;
42681 }
42682 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42683 - send_sig(SIGSEGV, current, 0);
42684 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42685 - goto out_free_dentry;
42686 + /*
42687 + * This bss-zeroing can fail if the ELF
42688 + * file specifies odd protections. So
42689 + * we don't check the return value
42690 + */
42691 }
42692
42693 +#ifdef CONFIG_PAX_RANDMMAP
42694 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42695 + unsigned long start, size;
42696 +
42697 + start = ELF_PAGEALIGN(elf_brk);
42698 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42699 + down_write(&current->mm->mmap_sem);
42700 + retval = -ENOMEM;
42701 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42702 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42703 + start = do_mmap(NULL, start, size, PROT_NONE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42704 + retval = IS_ERR_VALUE(start) ? start : 0;
42705 + }
42706 + up_write(&current->mm->mmap_sem);
42707 + if (retval == 0)
42708 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42709 + if (retval < 0) {
42710 + send_sig(SIGKILL, current, 0);
42711 + goto out_free_dentry;
42712 + }
42713 + }
42714 +#endif
42715 +
42716 if (elf_interpreter) {
42717 unsigned long uninitialized_var(interp_map_addr);
42718
42719 @@ -1098,7 +1581,7 @@ out:
42720 * Decide what to dump of a segment, part, all or none.
42721 */
42722 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42723 - unsigned long mm_flags)
42724 + unsigned long mm_flags, long signr)
42725 {
42726 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42727
42728 @@ -1132,7 +1615,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42729 if (vma->vm_file == NULL)
42730 return 0;
42731
42732 - if (FILTER(MAPPED_PRIVATE))
42733 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42734 goto whole;
42735
42736 /*
42737 @@ -1354,9 +1837,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42738 {
42739 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42740 int i = 0;
42741 - do
42742 + do {
42743 i += 2;
42744 - while (auxv[i - 2] != AT_NULL);
42745 + } while (auxv[i - 2] != AT_NULL);
42746 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42747 }
42748
42749 @@ -1862,14 +2345,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42750 }
42751
42752 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42753 - unsigned long mm_flags)
42754 + struct coredump_params *cprm)
42755 {
42756 struct vm_area_struct *vma;
42757 size_t size = 0;
42758
42759 for (vma = first_vma(current, gate_vma); vma != NULL;
42760 vma = next_vma(vma, gate_vma))
42761 - size += vma_dump_size(vma, mm_flags);
42762 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42763 return size;
42764 }
42765
42766 @@ -1963,7 +2446,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42767
42768 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42769
42770 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42771 + offset += elf_core_vma_data_size(gate_vma, cprm);
42772 offset += elf_core_extra_data_size();
42773 e_shoff = offset;
42774
42775 @@ -1977,10 +2460,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42776 offset = dataoff;
42777
42778 size += sizeof(*elf);
42779 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42780 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42781 goto end_coredump;
42782
42783 size += sizeof(*phdr4note);
42784 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42785 if (size > cprm->limit
42786 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42787 goto end_coredump;
42788 @@ -1994,7 +2479,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42789 phdr.p_offset = offset;
42790 phdr.p_vaddr = vma->vm_start;
42791 phdr.p_paddr = 0;
42792 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42793 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42794 phdr.p_memsz = vma->vm_end - vma->vm_start;
42795 offset += phdr.p_filesz;
42796 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42797 @@ -2005,6 +2490,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42798 phdr.p_align = ELF_EXEC_PAGESIZE;
42799
42800 size += sizeof(phdr);
42801 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42802 if (size > cprm->limit
42803 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42804 goto end_coredump;
42805 @@ -2029,7 +2515,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42806 unsigned long addr;
42807 unsigned long end;
42808
42809 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42810 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42811
42812 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42813 struct page *page;
42814 @@ -2038,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42815 page = get_dump_page(addr);
42816 if (page) {
42817 void *kaddr = kmap(page);
42818 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42819 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42820 !dump_write(cprm->file, kaddr,
42821 PAGE_SIZE);
42822 @@ -2055,6 +2542,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42823
42824 if (e_phnum == PN_XNUM) {
42825 size += sizeof(*shdr4extnum);
42826 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42827 if (size > cprm->limit
42828 || !dump_write(cprm->file, shdr4extnum,
42829 sizeof(*shdr4extnum)))
42830 @@ -2075,6 +2563,97 @@ out:
42831
42832 #endif /* CONFIG_ELF_CORE */
42833
42834 +#ifdef CONFIG_PAX_MPROTECT
42835 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42836 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42837 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42838 + *
42839 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42840 + * basis because we want to allow the common case and not the special ones.
42841 + */
42842 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42843 +{
42844 + struct elfhdr elf_h;
42845 + struct elf_phdr elf_p;
42846 + unsigned long i;
42847 + unsigned long oldflags;
42848 + bool is_textrel_rw, is_textrel_rx, is_relro;
42849 +
42850 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42851 + return;
42852 +
42853 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42854 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42855 +
42856 +#ifdef CONFIG_PAX_ELFRELOCS
42857 + /* possible TEXTREL */
42858 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42859 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42860 +#else
42861 + is_textrel_rw = false;
42862 + is_textrel_rx = false;
42863 +#endif
42864 +
42865 + /* possible RELRO */
42866 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42867 +
42868 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42869 + return;
42870 +
42871 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42872 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42873 +
42874 +#ifdef CONFIG_PAX_ETEXECRELOCS
42875 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42876 +#else
42877 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42878 +#endif
42879 +
42880 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42881 + !elf_check_arch(&elf_h) ||
42882 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42883 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42884 + return;
42885 +
42886 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42887 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42888 + return;
42889 + switch (elf_p.p_type) {
42890 + case PT_DYNAMIC:
42891 + if (!is_textrel_rw && !is_textrel_rx)
42892 + continue;
42893 + i = 0UL;
42894 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42895 + elf_dyn dyn;
42896 +
42897 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42898 + return;
42899 + if (dyn.d_tag == DT_NULL)
42900 + return;
42901 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42902 + gr_log_textrel(vma);
42903 + if (is_textrel_rw)
42904 + vma->vm_flags |= VM_MAYWRITE;
42905 + else
42906 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42907 + vma->vm_flags &= ~VM_MAYWRITE;
42908 + return;
42909 + }
42910 + i++;
42911 + }
42912 + return;
42913 +
42914 + case PT_GNU_RELRO:
42915 + if (!is_relro)
42916 + continue;
42917 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42918 + vma->vm_flags &= ~VM_MAYWRITE;
42919 + return;
42920 + }
42921 + }
42922 +}
42923 +#endif
42924 +
42925 static int __init init_elf_binfmt(void)
42926 {
42927 return register_binfmt(&elf_format);
42928 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42929 index 1bffbe0..c8c283e 100644
42930 --- a/fs/binfmt_flat.c
42931 +++ b/fs/binfmt_flat.c
42932 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42933 realdatastart = (unsigned long) -ENOMEM;
42934 printk("Unable to allocate RAM for process data, errno %d\n",
42935 (int)-realdatastart);
42936 + down_write(&current->mm->mmap_sem);
42937 do_munmap(current->mm, textpos, text_len);
42938 + up_write(&current->mm->mmap_sem);
42939 ret = realdatastart;
42940 goto err;
42941 }
42942 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42943 }
42944 if (IS_ERR_VALUE(result)) {
42945 printk("Unable to read data+bss, errno %d\n", (int)-result);
42946 + down_write(&current->mm->mmap_sem);
42947 do_munmap(current->mm, textpos, text_len);
42948 do_munmap(current->mm, realdatastart, len);
42949 + up_write(&current->mm->mmap_sem);
42950 ret = result;
42951 goto err;
42952 }
42953 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42954 }
42955 if (IS_ERR_VALUE(result)) {
42956 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42957 + down_write(&current->mm->mmap_sem);
42958 do_munmap(current->mm, textpos, text_len + data_len + extra +
42959 MAX_SHARED_LIBS * sizeof(unsigned long));
42960 + up_write(&current->mm->mmap_sem);
42961 ret = result;
42962 goto err;
42963 }
42964 diff --git a/fs/bio.c b/fs/bio.c
42965 index b980ecd..74800bf 100644
42966 --- a/fs/bio.c
42967 +++ b/fs/bio.c
42968 @@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42969 /*
42970 * Overflow, abort
42971 */
42972 - if (end < start)
42973 + if (end < start || end - start > INT_MAX - nr_pages)
42974 return ERR_PTR(-EINVAL);
42975
42976 nr_pages += end - start;
42977 @@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42978 const int read = bio_data_dir(bio) == READ;
42979 struct bio_map_data *bmd = bio->bi_private;
42980 int i;
42981 - char *p = bmd->sgvecs[0].iov_base;
42982 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42983
42984 __bio_for_each_segment(bvec, bio, i, 0) {
42985 char *addr = page_address(bvec->bv_page);
42986 diff --git a/fs/block_dev.c b/fs/block_dev.c
42987 index 5e9f198..6bf9b1c 100644
42988 --- a/fs/block_dev.c
42989 +++ b/fs/block_dev.c
42990 @@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42991 else if (bdev->bd_contains == bdev)
42992 return true; /* is a whole device which isn't held */
42993
42994 - else if (whole->bd_holder == bd_may_claim)
42995 + else if (whole->bd_holder == (void *)bd_may_claim)
42996 return true; /* is a partition of a device that is being partitioned */
42997 else if (whole->bd_holder != NULL)
42998 return false; /* is a partition of a held device */
42999 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43000 index d986824..af1befd 100644
43001 --- a/fs/btrfs/check-integrity.c
43002 +++ b/fs/btrfs/check-integrity.c
43003 @@ -157,7 +157,7 @@ struct btrfsic_block {
43004 union {
43005 bio_end_io_t *bio;
43006 bh_end_io_t *bh;
43007 - } orig_bio_bh_end_io;
43008 + } __no_const orig_bio_bh_end_io;
43009 int submit_bio_bh_rw;
43010 u64 flush_gen; /* only valid if !never_written */
43011 };
43012 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43013 index 0639a55..7d9e07f 100644
43014 --- a/fs/btrfs/ctree.c
43015 +++ b/fs/btrfs/ctree.c
43016 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43017 free_extent_buffer(buf);
43018 add_root_to_dirty_list(root);
43019 } else {
43020 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43021 - parent_start = parent->start;
43022 - else
43023 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43024 + if (parent)
43025 + parent_start = parent->start;
43026 + else
43027 + parent_start = 0;
43028 + } else
43029 parent_start = 0;
43030
43031 WARN_ON(trans->transid != btrfs_header_generation(parent));
43032 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43033 index 892b347..b3db246 100644
43034 --- a/fs/btrfs/inode.c
43035 +++ b/fs/btrfs/inode.c
43036 @@ -6930,7 +6930,7 @@ fail:
43037 return -ENOMEM;
43038 }
43039
43040 -static int btrfs_getattr(struct vfsmount *mnt,
43041 +int btrfs_getattr(struct vfsmount *mnt,
43042 struct dentry *dentry, struct kstat *stat)
43043 {
43044 struct inode *inode = dentry->d_inode;
43045 @@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43046 return 0;
43047 }
43048
43049 +EXPORT_SYMBOL(btrfs_getattr);
43050 +
43051 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
43052 +{
43053 + return BTRFS_I(inode)->root->anon_dev;
43054 +}
43055 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43056 +
43057 /*
43058 * If a file is moved, it will inherit the cow and compression flags of the new
43059 * directory.
43060 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43061 index 1b36f19..5ac7360 100644
43062 --- a/fs/btrfs/ioctl.c
43063 +++ b/fs/btrfs/ioctl.c
43064 @@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43065 for (i = 0; i < num_types; i++) {
43066 struct btrfs_space_info *tmp;
43067
43068 + /* Don't copy in more than we allocated */
43069 if (!slot_count)
43070 break;
43071
43072 + slot_count--;
43073 +
43074 info = NULL;
43075 rcu_read_lock();
43076 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43077 @@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43078 memcpy(dest, &space, sizeof(space));
43079 dest++;
43080 space_args.total_spaces++;
43081 - slot_count--;
43082 }
43083 - if (!slot_count)
43084 - break;
43085 }
43086 up_read(&info->groups_sem);
43087 }
43088
43089 - user_dest = (struct btrfs_ioctl_space_info *)
43090 + user_dest = (struct btrfs_ioctl_space_info __user *)
43091 (arg + sizeof(struct btrfs_ioctl_space_args));
43092
43093 if (copy_to_user(user_dest, dest_orig, alloc_size))
43094 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43095 index 8c1aae2..1e46446 100644
43096 --- a/fs/btrfs/relocation.c
43097 +++ b/fs/btrfs/relocation.c
43098 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43099 }
43100 spin_unlock(&rc->reloc_root_tree.lock);
43101
43102 - BUG_ON((struct btrfs_root *)node->data != root);
43103 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
43104
43105 if (!del) {
43106 spin_lock(&rc->reloc_root_tree.lock);
43107 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43108 index 622f469..e8d2d55 100644
43109 --- a/fs/cachefiles/bind.c
43110 +++ b/fs/cachefiles/bind.c
43111 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43112 args);
43113
43114 /* start by checking things over */
43115 - ASSERT(cache->fstop_percent >= 0 &&
43116 - cache->fstop_percent < cache->fcull_percent &&
43117 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
43118 cache->fcull_percent < cache->frun_percent &&
43119 cache->frun_percent < 100);
43120
43121 - ASSERT(cache->bstop_percent >= 0 &&
43122 - cache->bstop_percent < cache->bcull_percent &&
43123 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
43124 cache->bcull_percent < cache->brun_percent &&
43125 cache->brun_percent < 100);
43126
43127 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43128 index 0a1467b..6a53245 100644
43129 --- a/fs/cachefiles/daemon.c
43130 +++ b/fs/cachefiles/daemon.c
43131 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43132 if (n > buflen)
43133 return -EMSGSIZE;
43134
43135 - if (copy_to_user(_buffer, buffer, n) != 0)
43136 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43137 return -EFAULT;
43138
43139 return n;
43140 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43141 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43142 return -EIO;
43143
43144 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
43145 + if (datalen > PAGE_SIZE - 1)
43146 return -EOPNOTSUPP;
43147
43148 /* drag the command string into the kernel so we can parse it */
43149 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43150 if (args[0] != '%' || args[1] != '\0')
43151 return -EINVAL;
43152
43153 - if (fstop < 0 || fstop >= cache->fcull_percent)
43154 + if (fstop >= cache->fcull_percent)
43155 return cachefiles_daemon_range_error(cache, args);
43156
43157 cache->fstop_percent = fstop;
43158 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43159 if (args[0] != '%' || args[1] != '\0')
43160 return -EINVAL;
43161
43162 - if (bstop < 0 || bstop >= cache->bcull_percent)
43163 + if (bstop >= cache->bcull_percent)
43164 return cachefiles_daemon_range_error(cache, args);
43165
43166 cache->bstop_percent = bstop;
43167 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43168 index bd6bc1b..b627b53 100644
43169 --- a/fs/cachefiles/internal.h
43170 +++ b/fs/cachefiles/internal.h
43171 @@ -57,7 +57,7 @@ struct cachefiles_cache {
43172 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43173 struct rb_root active_nodes; /* active nodes (can't be culled) */
43174 rwlock_t active_lock; /* lock for active_nodes */
43175 - atomic_t gravecounter; /* graveyard uniquifier */
43176 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43177 unsigned frun_percent; /* when to stop culling (% files) */
43178 unsigned fcull_percent; /* when to start culling (% files) */
43179 unsigned fstop_percent; /* when to stop allocating (% files) */
43180 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43181 * proc.c
43182 */
43183 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43184 -extern atomic_t cachefiles_lookup_histogram[HZ];
43185 -extern atomic_t cachefiles_mkdir_histogram[HZ];
43186 -extern atomic_t cachefiles_create_histogram[HZ];
43187 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43188 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43189 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43190
43191 extern int __init cachefiles_proc_init(void);
43192 extern void cachefiles_proc_cleanup(void);
43193 static inline
43194 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43195 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43196 {
43197 unsigned long jif = jiffies - start_jif;
43198 if (jif >= HZ)
43199 jif = HZ - 1;
43200 - atomic_inc(&histogram[jif]);
43201 + atomic_inc_unchecked(&histogram[jif]);
43202 }
43203
43204 #else
43205 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43206 index a0358c2..d6137f2 100644
43207 --- a/fs/cachefiles/namei.c
43208 +++ b/fs/cachefiles/namei.c
43209 @@ -318,7 +318,7 @@ try_again:
43210 /* first step is to make up a grave dentry in the graveyard */
43211 sprintf(nbuffer, "%08x%08x",
43212 (uint32_t) get_seconds(),
43213 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43214 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43215
43216 /* do the multiway lock magic */
43217 trap = lock_rename(cache->graveyard, dir);
43218 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43219 index eccd339..4c1d995 100644
43220 --- a/fs/cachefiles/proc.c
43221 +++ b/fs/cachefiles/proc.c
43222 @@ -14,9 +14,9 @@
43223 #include <linux/seq_file.h>
43224 #include "internal.h"
43225
43226 -atomic_t cachefiles_lookup_histogram[HZ];
43227 -atomic_t cachefiles_mkdir_histogram[HZ];
43228 -atomic_t cachefiles_create_histogram[HZ];
43229 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43230 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43231 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43232
43233 /*
43234 * display the latency histogram
43235 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43236 return 0;
43237 default:
43238 index = (unsigned long) v - 3;
43239 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43240 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43241 - z = atomic_read(&cachefiles_create_histogram[index]);
43242 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43243 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43244 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43245 if (x == 0 && y == 0 && z == 0)
43246 return 0;
43247
43248 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43249 index 0e3c092..818480e 100644
43250 --- a/fs/cachefiles/rdwr.c
43251 +++ b/fs/cachefiles/rdwr.c
43252 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43253 old_fs = get_fs();
43254 set_fs(KERNEL_DS);
43255 ret = file->f_op->write(
43256 - file, (const void __user *) data, len, &pos);
43257 + file, (const void __force_user *) data, len, &pos);
43258 set_fs(old_fs);
43259 kunmap(page);
43260 if (ret != len)
43261 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43262 index 3e8094b..cb3ff3d 100644
43263 --- a/fs/ceph/dir.c
43264 +++ b/fs/ceph/dir.c
43265 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43266 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43267 struct ceph_mds_client *mdsc = fsc->mdsc;
43268 unsigned frag = fpos_frag(filp->f_pos);
43269 - int off = fpos_off(filp->f_pos);
43270 + unsigned int off = fpos_off(filp->f_pos);
43271 int err;
43272 u32 ftype;
43273 struct ceph_mds_reply_info_parsed *rinfo;
43274 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43275 if (nd &&
43276 (nd->flags & LOOKUP_OPEN) &&
43277 !(nd->intent.open.flags & O_CREAT)) {
43278 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43279 + int mode = nd->intent.open.create_mode & ~current_umask();
43280 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43281 }
43282
43283 diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
43284 index cfd1ce3..6b13a74 100644
43285 --- a/fs/cifs/asn1.c
43286 +++ b/fs/cifs/asn1.c
43287 @@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
43288
43289 static int
43290 asn1_oid_decode(struct asn1_ctx *ctx,
43291 + unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
43292 +static int
43293 +asn1_oid_decode(struct asn1_ctx *ctx,
43294 unsigned char *eoc, unsigned long **oid, unsigned int *len)
43295 {
43296 unsigned long subid;
43297 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43298 index 24b3dfc..3cd5454 100644
43299 --- a/fs/cifs/cifs_debug.c
43300 +++ b/fs/cifs/cifs_debug.c
43301 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43302
43303 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43304 #ifdef CONFIG_CIFS_STATS2
43305 - atomic_set(&totBufAllocCount, 0);
43306 - atomic_set(&totSmBufAllocCount, 0);
43307 + atomic_set_unchecked(&totBufAllocCount, 0);
43308 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43309 #endif /* CONFIG_CIFS_STATS2 */
43310 spin_lock(&cifs_tcp_ses_lock);
43311 list_for_each(tmp1, &cifs_tcp_ses_list) {
43312 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43313 tcon = list_entry(tmp3,
43314 struct cifs_tcon,
43315 tcon_list);
43316 - atomic_set(&tcon->num_smbs_sent, 0);
43317 - atomic_set(&tcon->num_writes, 0);
43318 - atomic_set(&tcon->num_reads, 0);
43319 - atomic_set(&tcon->num_oplock_brks, 0);
43320 - atomic_set(&tcon->num_opens, 0);
43321 - atomic_set(&tcon->num_posixopens, 0);
43322 - atomic_set(&tcon->num_posixmkdirs, 0);
43323 - atomic_set(&tcon->num_closes, 0);
43324 - atomic_set(&tcon->num_deletes, 0);
43325 - atomic_set(&tcon->num_mkdirs, 0);
43326 - atomic_set(&tcon->num_rmdirs, 0);
43327 - atomic_set(&tcon->num_renames, 0);
43328 - atomic_set(&tcon->num_t2renames, 0);
43329 - atomic_set(&tcon->num_ffirst, 0);
43330 - atomic_set(&tcon->num_fnext, 0);
43331 - atomic_set(&tcon->num_fclose, 0);
43332 - atomic_set(&tcon->num_hardlinks, 0);
43333 - atomic_set(&tcon->num_symlinks, 0);
43334 - atomic_set(&tcon->num_locks, 0);
43335 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43336 + atomic_set_unchecked(&tcon->num_writes, 0);
43337 + atomic_set_unchecked(&tcon->num_reads, 0);
43338 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43339 + atomic_set_unchecked(&tcon->num_opens, 0);
43340 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43341 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43342 + atomic_set_unchecked(&tcon->num_closes, 0);
43343 + atomic_set_unchecked(&tcon->num_deletes, 0);
43344 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43345 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43346 + atomic_set_unchecked(&tcon->num_renames, 0);
43347 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43348 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43349 + atomic_set_unchecked(&tcon->num_fnext, 0);
43350 + atomic_set_unchecked(&tcon->num_fclose, 0);
43351 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43352 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43353 + atomic_set_unchecked(&tcon->num_locks, 0);
43354 }
43355 }
43356 }
43357 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43358 smBufAllocCount.counter, cifs_min_small);
43359 #ifdef CONFIG_CIFS_STATS2
43360 seq_printf(m, "Total Large %d Small %d Allocations\n",
43361 - atomic_read(&totBufAllocCount),
43362 - atomic_read(&totSmBufAllocCount));
43363 + atomic_read_unchecked(&totBufAllocCount),
43364 + atomic_read_unchecked(&totSmBufAllocCount));
43365 #endif /* CONFIG_CIFS_STATS2 */
43366
43367 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43368 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43369 if (tcon->need_reconnect)
43370 seq_puts(m, "\tDISCONNECTED ");
43371 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43372 - atomic_read(&tcon->num_smbs_sent),
43373 - atomic_read(&tcon->num_oplock_brks));
43374 + atomic_read_unchecked(&tcon->num_smbs_sent),
43375 + atomic_read_unchecked(&tcon->num_oplock_brks));
43376 seq_printf(m, "\nReads: %d Bytes: %lld",
43377 - atomic_read(&tcon->num_reads),
43378 + atomic_read_unchecked(&tcon->num_reads),
43379 (long long)(tcon->bytes_read));
43380 seq_printf(m, "\nWrites: %d Bytes: %lld",
43381 - atomic_read(&tcon->num_writes),
43382 + atomic_read_unchecked(&tcon->num_writes),
43383 (long long)(tcon->bytes_written));
43384 seq_printf(m, "\nFlushes: %d",
43385 - atomic_read(&tcon->num_flushes));
43386 + atomic_read_unchecked(&tcon->num_flushes));
43387 seq_printf(m, "\nLocks: %d HardLinks: %d "
43388 "Symlinks: %d",
43389 - atomic_read(&tcon->num_locks),
43390 - atomic_read(&tcon->num_hardlinks),
43391 - atomic_read(&tcon->num_symlinks));
43392 + atomic_read_unchecked(&tcon->num_locks),
43393 + atomic_read_unchecked(&tcon->num_hardlinks),
43394 + atomic_read_unchecked(&tcon->num_symlinks));
43395 seq_printf(m, "\nOpens: %d Closes: %d "
43396 "Deletes: %d",
43397 - atomic_read(&tcon->num_opens),
43398 - atomic_read(&tcon->num_closes),
43399 - atomic_read(&tcon->num_deletes));
43400 + atomic_read_unchecked(&tcon->num_opens),
43401 + atomic_read_unchecked(&tcon->num_closes),
43402 + atomic_read_unchecked(&tcon->num_deletes));
43403 seq_printf(m, "\nPosix Opens: %d "
43404 "Posix Mkdirs: %d",
43405 - atomic_read(&tcon->num_posixopens),
43406 - atomic_read(&tcon->num_posixmkdirs));
43407 + atomic_read_unchecked(&tcon->num_posixopens),
43408 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43409 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43410 - atomic_read(&tcon->num_mkdirs),
43411 - atomic_read(&tcon->num_rmdirs));
43412 + atomic_read_unchecked(&tcon->num_mkdirs),
43413 + atomic_read_unchecked(&tcon->num_rmdirs));
43414 seq_printf(m, "\nRenames: %d T2 Renames %d",
43415 - atomic_read(&tcon->num_renames),
43416 - atomic_read(&tcon->num_t2renames));
43417 + atomic_read_unchecked(&tcon->num_renames),
43418 + atomic_read_unchecked(&tcon->num_t2renames));
43419 seq_printf(m, "\nFindFirst: %d FNext %d "
43420 "FClose %d",
43421 - atomic_read(&tcon->num_ffirst),
43422 - atomic_read(&tcon->num_fnext),
43423 - atomic_read(&tcon->num_fclose));
43424 + atomic_read_unchecked(&tcon->num_ffirst),
43425 + atomic_read_unchecked(&tcon->num_fnext),
43426 + atomic_read_unchecked(&tcon->num_fclose));
43427 }
43428 }
43429 }
43430 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43431 index 6ee1cb4..8443157 100644
43432 --- a/fs/cifs/cifsfs.c
43433 +++ b/fs/cifs/cifsfs.c
43434 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
43435 cifs_req_cachep = kmem_cache_create("cifs_request",
43436 CIFSMaxBufSize +
43437 MAX_CIFS_HDR_SIZE, 0,
43438 - SLAB_HWCACHE_ALIGN, NULL);
43439 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43440 if (cifs_req_cachep == NULL)
43441 return -ENOMEM;
43442
43443 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
43444 efficient to alloc 1 per page off the slab compared to 17K (5page)
43445 alloc of large cifs buffers even when page debugging is on */
43446 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43447 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43448 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43449 NULL);
43450 if (cifs_sm_req_cachep == NULL) {
43451 mempool_destroy(cifs_req_poolp);
43452 @@ -1101,8 +1101,8 @@ init_cifs(void)
43453 atomic_set(&bufAllocCount, 0);
43454 atomic_set(&smBufAllocCount, 0);
43455 #ifdef CONFIG_CIFS_STATS2
43456 - atomic_set(&totBufAllocCount, 0);
43457 - atomic_set(&totSmBufAllocCount, 0);
43458 + atomic_set_unchecked(&totBufAllocCount, 0);
43459 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43460 #endif /* CONFIG_CIFS_STATS2 */
43461
43462 atomic_set(&midCount, 0);
43463 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43464 index d47d20a..77e8b33 100644
43465 --- a/fs/cifs/cifsglob.h
43466 +++ b/fs/cifs/cifsglob.h
43467 @@ -388,28 +388,28 @@ struct cifs_tcon {
43468 __u16 Flags; /* optional support bits */
43469 enum statusEnum tidStatus;
43470 #ifdef CONFIG_CIFS_STATS
43471 - atomic_t num_smbs_sent;
43472 - atomic_t num_writes;
43473 - atomic_t num_reads;
43474 - atomic_t num_flushes;
43475 - atomic_t num_oplock_brks;
43476 - atomic_t num_opens;
43477 - atomic_t num_closes;
43478 - atomic_t num_deletes;
43479 - atomic_t num_mkdirs;
43480 - atomic_t num_posixopens;
43481 - atomic_t num_posixmkdirs;
43482 - atomic_t num_rmdirs;
43483 - atomic_t num_renames;
43484 - atomic_t num_t2renames;
43485 - atomic_t num_ffirst;
43486 - atomic_t num_fnext;
43487 - atomic_t num_fclose;
43488 - atomic_t num_hardlinks;
43489 - atomic_t num_symlinks;
43490 - atomic_t num_locks;
43491 - atomic_t num_acl_get;
43492 - atomic_t num_acl_set;
43493 + atomic_unchecked_t num_smbs_sent;
43494 + atomic_unchecked_t num_writes;
43495 + atomic_unchecked_t num_reads;
43496 + atomic_unchecked_t num_flushes;
43497 + atomic_unchecked_t num_oplock_brks;
43498 + atomic_unchecked_t num_opens;
43499 + atomic_unchecked_t num_closes;
43500 + atomic_unchecked_t num_deletes;
43501 + atomic_unchecked_t num_mkdirs;
43502 + atomic_unchecked_t num_posixopens;
43503 + atomic_unchecked_t num_posixmkdirs;
43504 + atomic_unchecked_t num_rmdirs;
43505 + atomic_unchecked_t num_renames;
43506 + atomic_unchecked_t num_t2renames;
43507 + atomic_unchecked_t num_ffirst;
43508 + atomic_unchecked_t num_fnext;
43509 + atomic_unchecked_t num_fclose;
43510 + atomic_unchecked_t num_hardlinks;
43511 + atomic_unchecked_t num_symlinks;
43512 + atomic_unchecked_t num_locks;
43513 + atomic_unchecked_t num_acl_get;
43514 + atomic_unchecked_t num_acl_set;
43515 #ifdef CONFIG_CIFS_STATS2
43516 unsigned long long time_writes;
43517 unsigned long long time_reads;
43518 @@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
43519 }
43520
43521 #ifdef CONFIG_CIFS_STATS
43522 -#define cifs_stats_inc atomic_inc
43523 +#define cifs_stats_inc atomic_inc_unchecked
43524
43525 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43526 unsigned int bytes)
43527 @@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43528 /* Various Debug counters */
43529 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43530 #ifdef CONFIG_CIFS_STATS2
43531 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43532 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43533 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43534 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43535 #endif
43536 GLOBAL_EXTERN atomic_t smBufAllocCount;
43537 GLOBAL_EXTERN atomic_t midCount;
43538 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43539 index 6b0e064..94e6c3c 100644
43540 --- a/fs/cifs/link.c
43541 +++ b/fs/cifs/link.c
43542 @@ -600,7 +600,7 @@ symlink_exit:
43543
43544 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43545 {
43546 - char *p = nd_get_link(nd);
43547 + const char *p = nd_get_link(nd);
43548 if (!IS_ERR(p))
43549 kfree(p);
43550 }
43551 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43552 index 703ef5c..2a44ed5 100644
43553 --- a/fs/cifs/misc.c
43554 +++ b/fs/cifs/misc.c
43555 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43556 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43557 atomic_inc(&bufAllocCount);
43558 #ifdef CONFIG_CIFS_STATS2
43559 - atomic_inc(&totBufAllocCount);
43560 + atomic_inc_unchecked(&totBufAllocCount);
43561 #endif /* CONFIG_CIFS_STATS2 */
43562 }
43563
43564 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43565 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43566 atomic_inc(&smBufAllocCount);
43567 #ifdef CONFIG_CIFS_STATS2
43568 - atomic_inc(&totSmBufAllocCount);
43569 + atomic_inc_unchecked(&totSmBufAllocCount);
43570 #endif /* CONFIG_CIFS_STATS2 */
43571
43572 }
43573 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43574 index 6901578..d402eb5 100644
43575 --- a/fs/coda/cache.c
43576 +++ b/fs/coda/cache.c
43577 @@ -24,7 +24,7 @@
43578 #include "coda_linux.h"
43579 #include "coda_cache.h"
43580
43581 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43582 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43583
43584 /* replace or extend an acl cache hit */
43585 void coda_cache_enter(struct inode *inode, int mask)
43586 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43587 struct coda_inode_info *cii = ITOC(inode);
43588
43589 spin_lock(&cii->c_lock);
43590 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43591 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43592 if (cii->c_uid != current_fsuid()) {
43593 cii->c_uid = current_fsuid();
43594 cii->c_cached_perm = mask;
43595 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43596 {
43597 struct coda_inode_info *cii = ITOC(inode);
43598 spin_lock(&cii->c_lock);
43599 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43600 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43601 spin_unlock(&cii->c_lock);
43602 }
43603
43604 /* remove all acl caches */
43605 void coda_cache_clear_all(struct super_block *sb)
43606 {
43607 - atomic_inc(&permission_epoch);
43608 + atomic_inc_unchecked(&permission_epoch);
43609 }
43610
43611
43612 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43613 spin_lock(&cii->c_lock);
43614 hit = (mask & cii->c_cached_perm) == mask &&
43615 cii->c_uid == current_fsuid() &&
43616 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43617 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43618 spin_unlock(&cii->c_lock);
43619
43620 return hit;
43621 diff --git a/fs/compat.c b/fs/compat.c
43622 index 07880ba..3fb2862 100644
43623 --- a/fs/compat.c
43624 +++ b/fs/compat.c
43625 @@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43626
43627 set_fs(KERNEL_DS);
43628 /* The __user pointer cast is valid because of the set_fs() */
43629 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43630 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43631 set_fs(oldfs);
43632 /* truncating is ok because it's a user address */
43633 if (!ret)
43634 @@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43635 goto out;
43636
43637 ret = -EINVAL;
43638 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43639 + if (nr_segs > UIO_MAXIOV)
43640 goto out;
43641 if (nr_segs > fast_segs) {
43642 ret = -ENOMEM;
43643 @@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
43644
43645 struct compat_readdir_callback {
43646 struct compat_old_linux_dirent __user *dirent;
43647 + struct file * file;
43648 int result;
43649 };
43650
43651 @@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43652 buf->result = -EOVERFLOW;
43653 return -EOVERFLOW;
43654 }
43655 +
43656 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43657 + return 0;
43658 +
43659 buf->result++;
43660 dirent = buf->dirent;
43661 if (!access_ok(VERIFY_WRITE, dirent,
43662 @@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43663
43664 buf.result = 0;
43665 buf.dirent = dirent;
43666 + buf.file = file;
43667
43668 error = vfs_readdir(file, compat_fillonedir, &buf);
43669 if (buf.result)
43670 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
43671 struct compat_getdents_callback {
43672 struct compat_linux_dirent __user *current_dir;
43673 struct compat_linux_dirent __user *previous;
43674 + struct file * file;
43675 int count;
43676 int error;
43677 };
43678 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43679 buf->error = -EOVERFLOW;
43680 return -EOVERFLOW;
43681 }
43682 +
43683 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43684 + return 0;
43685 +
43686 dirent = buf->previous;
43687 if (dirent) {
43688 if (__put_user(offset, &dirent->d_off))
43689 @@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43690 buf.previous = NULL;
43691 buf.count = count;
43692 buf.error = 0;
43693 + buf.file = file;
43694
43695 error = vfs_readdir(file, compat_filldir, &buf);
43696 if (error >= 0)
43697 @@ -990,6 +1002,7 @@ out:
43698 struct compat_getdents_callback64 {
43699 struct linux_dirent64 __user *current_dir;
43700 struct linux_dirent64 __user *previous;
43701 + struct file * file;
43702 int count;
43703 int error;
43704 };
43705 @@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43706 buf->error = -EINVAL; /* only used if we fail.. */
43707 if (reclen > buf->count)
43708 return -EINVAL;
43709 +
43710 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43711 + return 0;
43712 +
43713 dirent = buf->previous;
43714
43715 if (dirent) {
43716 @@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43717 buf.previous = NULL;
43718 buf.count = count;
43719 buf.error = 0;
43720 + buf.file = file;
43721
43722 error = vfs_readdir(file, compat_filldir64, &buf);
43723 if (error >= 0)
43724 error = buf.error;
43725 lastdirent = buf.previous;
43726 if (lastdirent) {
43727 - typeof(lastdirent->d_off) d_off = file->f_pos;
43728 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43729 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43730 error = -EFAULT;
43731 else
43732 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43733 index 112e45a..b59845b 100644
43734 --- a/fs/compat_binfmt_elf.c
43735 +++ b/fs/compat_binfmt_elf.c
43736 @@ -30,11 +30,13 @@
43737 #undef elf_phdr
43738 #undef elf_shdr
43739 #undef elf_note
43740 +#undef elf_dyn
43741 #undef elf_addr_t
43742 #define elfhdr elf32_hdr
43743 #define elf_phdr elf32_phdr
43744 #define elf_shdr elf32_shdr
43745 #define elf_note elf32_note
43746 +#define elf_dyn Elf32_Dyn
43747 #define elf_addr_t Elf32_Addr
43748
43749 /*
43750 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43751 index a26bea1..ae23e72 100644
43752 --- a/fs/compat_ioctl.c
43753 +++ b/fs/compat_ioctl.c
43754 @@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43755
43756 err = get_user(palp, &up->palette);
43757 err |= get_user(length, &up->length);
43758 + if (err)
43759 + return -EFAULT;
43760
43761 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43762 err = put_user(compat_ptr(palp), &up_native->palette);
43763 @@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43764 return -EFAULT;
43765 if (__get_user(udata, &ss32->iomem_base))
43766 return -EFAULT;
43767 - ss.iomem_base = compat_ptr(udata);
43768 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43769 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43770 __get_user(ss.port_high, &ss32->port_high))
43771 return -EFAULT;
43772 @@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
43773 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43774 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43775 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43776 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43777 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43778 return -EFAULT;
43779
43780 return ioctl_preallocate(file, p);
43781 @@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43782 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43783 {
43784 unsigned int a, b;
43785 - a = *(unsigned int *)p;
43786 - b = *(unsigned int *)q;
43787 + a = *(const unsigned int *)p;
43788 + b = *(const unsigned int *)q;
43789 if (a > b)
43790 return 1;
43791 if (a < b)
43792 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43793 index 5ddd7eb..c18bf04 100644
43794 --- a/fs/configfs/dir.c
43795 +++ b/fs/configfs/dir.c
43796 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43797 }
43798 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43799 struct configfs_dirent *next;
43800 - const char * name;
43801 + const unsigned char * name;
43802 + char d_name[sizeof(next->s_dentry->d_iname)];
43803 int len;
43804 struct inode *inode = NULL;
43805
43806 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43807 continue;
43808
43809 name = configfs_get_name(next);
43810 - len = strlen(name);
43811 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43812 + len = next->s_dentry->d_name.len;
43813 + memcpy(d_name, name, len);
43814 + name = d_name;
43815 + } else
43816 + len = strlen(name);
43817
43818 /*
43819 * We'll have a dentry and an inode for
43820 diff --git a/fs/configfs/file.c b/fs/configfs/file.c
43821 index 2b6cb23..d76e879 100644
43822 --- a/fs/configfs/file.c
43823 +++ b/fs/configfs/file.c
43824 @@ -135,6 +135,8 @@ out:
43825 */
43826
43827 static int
43828 +fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count) __size_overflow(3);
43829 +static int
43830 fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
43831 {
43832 int error;
43833 diff --git a/fs/dcache.c b/fs/dcache.c
43834 index 2576d14..0cec38d 100644
43835 --- a/fs/dcache.c
43836 +++ b/fs/dcache.c
43837 @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
43838 static struct hlist_bl_head *dentry_hashtable __read_mostly;
43839
43840 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
43841 - unsigned long hash)
43842 + unsigned int hash)
43843 {
43844 - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
43845 - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
43846 + hash += (unsigned long) parent / L1_CACHE_BYTES;
43847 + hash = hash + (hash >> D_HASHBITS);
43848 return dentry_hashtable + (hash & D_HASHMASK);
43849 }
43850
43851 @@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
43852 mempages -= reserve;
43853
43854 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43855 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43856 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43857
43858 dcache_init();
43859 inode_init();
43860 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43861 index 956d5dd..e755e04 100644
43862 --- a/fs/debugfs/inode.c
43863 +++ b/fs/debugfs/inode.c
43864 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43865 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43866 {
43867 return debugfs_create_file(name,
43868 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43869 + S_IFDIR | S_IRWXU,
43870 +#else
43871 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43872 +#endif
43873 parent, NULL, NULL);
43874 }
43875 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43876 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43877 index ab35b11..b30af66 100644
43878 --- a/fs/ecryptfs/inode.c
43879 +++ b/fs/ecryptfs/inode.c
43880 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43881 old_fs = get_fs();
43882 set_fs(get_ds());
43883 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43884 - (char __user *)lower_buf,
43885 + (char __force_user *)lower_buf,
43886 lower_bufsiz);
43887 set_fs(old_fs);
43888 if (rc < 0)
43889 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43890 }
43891 old_fs = get_fs();
43892 set_fs(get_ds());
43893 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43894 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43895 set_fs(old_fs);
43896 if (rc < 0) {
43897 kfree(buf);
43898 @@ -733,7 +733,7 @@ out:
43899 static void
43900 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43901 {
43902 - char *buf = nd_get_link(nd);
43903 + const char *buf = nd_get_link(nd);
43904 if (!IS_ERR(buf)) {
43905 /* Free the char* */
43906 kfree(buf);
43907 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43908 index 3a06f40..f7af544 100644
43909 --- a/fs/ecryptfs/miscdev.c
43910 +++ b/fs/ecryptfs/miscdev.c
43911 @@ -345,7 +345,7 @@ check_list:
43912 goto out_unlock_msg_ctx;
43913 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43914 if (msg_ctx->msg) {
43915 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43916 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43917 goto out_unlock_msg_ctx;
43918 i += packet_length_size;
43919 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43920 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43921 index b2a34a1..162fa69 100644
43922 --- a/fs/ecryptfs/read_write.c
43923 +++ b/fs/ecryptfs/read_write.c
43924 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43925 return -EIO;
43926 fs_save = get_fs();
43927 set_fs(get_ds());
43928 - rc = vfs_write(lower_file, data, size, &offset);
43929 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43930 set_fs(fs_save);
43931 mark_inode_dirty_sync(ecryptfs_inode);
43932 return rc;
43933 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43934 return -EIO;
43935 fs_save = get_fs();
43936 set_fs(get_ds());
43937 - rc = vfs_read(lower_file, data, size, &offset);
43938 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43939 set_fs(fs_save);
43940 return rc;
43941 }
43942 diff --git a/fs/exec.c b/fs/exec.c
43943 index 153dee1..ab4ebe9 100644
43944 --- a/fs/exec.c
43945 +++ b/fs/exec.c
43946 @@ -55,6 +55,13 @@
43947 #include <linux/pipe_fs_i.h>
43948 #include <linux/oom.h>
43949 #include <linux/compat.h>
43950 +#include <linux/random.h>
43951 +#include <linux/seq_file.h>
43952 +
43953 +#ifdef CONFIG_PAX_REFCOUNT
43954 +#include <linux/kallsyms.h>
43955 +#include <linux/kdebug.h>
43956 +#endif
43957
43958 #include <asm/uaccess.h>
43959 #include <asm/mmu_context.h>
43960 @@ -63,6 +70,15 @@
43961 #include <trace/events/task.h>
43962 #include "internal.h"
43963
43964 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43965 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43966 +#endif
43967 +
43968 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43969 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43970 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43971 +#endif
43972 +
43973 int core_uses_pid;
43974 char core_pattern[CORENAME_MAX_SIZE] = "core";
43975 unsigned int core_pipe_limit;
43976 @@ -72,7 +88,7 @@ struct core_name {
43977 char *corename;
43978 int used, size;
43979 };
43980 -static atomic_t call_count = ATOMIC_INIT(1);
43981 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43982
43983 /* The maximal length of core_pattern is also specified in sysctl.c */
43984
43985 @@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43986 int write)
43987 {
43988 struct page *page;
43989 - int ret;
43990
43991 -#ifdef CONFIG_STACK_GROWSUP
43992 - if (write) {
43993 - ret = expand_downwards(bprm->vma, pos);
43994 - if (ret < 0)
43995 - return NULL;
43996 - }
43997 -#endif
43998 - ret = get_user_pages(current, bprm->mm, pos,
43999 - 1, write, 1, &page, NULL);
44000 - if (ret <= 0)
44001 + if (0 > expand_downwards(bprm->vma, pos))
44002 + return NULL;
44003 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44004 return NULL;
44005
44006 if (write) {
44007 @@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44008 if (size <= ARG_MAX)
44009 return page;
44010
44011 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44012 + // only allow 512KB for argv+env on suid/sgid binaries
44013 + // to prevent easy ASLR exhaustion
44014 + if (((bprm->cred->euid != current_euid()) ||
44015 + (bprm->cred->egid != current_egid())) &&
44016 + (size > (512 * 1024))) {
44017 + put_page(page);
44018 + return NULL;
44019 + }
44020 +#endif
44021 +
44022 /*
44023 * Limit to 1/4-th the stack size for the argv+env strings.
44024 * This ensures that:
44025 @@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44026 vma->vm_end = STACK_TOP_MAX;
44027 vma->vm_start = vma->vm_end - PAGE_SIZE;
44028 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44029 +
44030 +#ifdef CONFIG_PAX_SEGMEXEC
44031 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44032 +#endif
44033 +
44034 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44035 INIT_LIST_HEAD(&vma->anon_vma_chain);
44036
44037 @@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44038 mm->stack_vm = mm->total_vm = 1;
44039 up_write(&mm->mmap_sem);
44040 bprm->p = vma->vm_end - sizeof(void *);
44041 +
44042 +#ifdef CONFIG_PAX_RANDUSTACK
44043 + if (randomize_va_space)
44044 + bprm->p ^= random32() & ~PAGE_MASK;
44045 +#endif
44046 +
44047 return 0;
44048 err:
44049 up_write(&mm->mmap_sem);
44050 @@ -398,19 +428,7 @@ err:
44051 return err;
44052 }
44053
44054 -struct user_arg_ptr {
44055 -#ifdef CONFIG_COMPAT
44056 - bool is_compat;
44057 -#endif
44058 - union {
44059 - const char __user *const __user *native;
44060 -#ifdef CONFIG_COMPAT
44061 - compat_uptr_t __user *compat;
44062 -#endif
44063 - } ptr;
44064 -};
44065 -
44066 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44067 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44068 {
44069 const char __user *native;
44070
44071 @@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44072 compat_uptr_t compat;
44073
44074 if (get_user(compat, argv.ptr.compat + nr))
44075 - return ERR_PTR(-EFAULT);
44076 + return (const char __force_user *)ERR_PTR(-EFAULT);
44077
44078 return compat_ptr(compat);
44079 }
44080 #endif
44081
44082 if (get_user(native, argv.ptr.native + nr))
44083 - return ERR_PTR(-EFAULT);
44084 + return (const char __force_user *)ERR_PTR(-EFAULT);
44085
44086 return native;
44087 }
44088 @@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
44089 if (!p)
44090 break;
44091
44092 - if (IS_ERR(p))
44093 + if (IS_ERR((const char __force_kernel *)p))
44094 return -EFAULT;
44095
44096 if (i++ >= max)
44097 @@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44098
44099 ret = -EFAULT;
44100 str = get_user_arg_ptr(argv, argc);
44101 - if (IS_ERR(str))
44102 + if (IS_ERR((const char __force_kernel *)str))
44103 goto out;
44104
44105 len = strnlen_user(str, MAX_ARG_STRLEN);
44106 @@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44107 int r;
44108 mm_segment_t oldfs = get_fs();
44109 struct user_arg_ptr argv = {
44110 - .ptr.native = (const char __user *const __user *)__argv,
44111 + .ptr.native = (const char __force_user *const __force_user *)__argv,
44112 };
44113
44114 set_fs(KERNEL_DS);
44115 @@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44116 unsigned long new_end = old_end - shift;
44117 struct mmu_gather tlb;
44118
44119 - BUG_ON(new_start > new_end);
44120 + if (new_start >= new_end || new_start < mmap_min_addr)
44121 + return -ENOMEM;
44122
44123 /*
44124 * ensure there are no vmas between where we want to go
44125 @@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44126 if (vma != find_vma(mm, new_start))
44127 return -EFAULT;
44128
44129 +#ifdef CONFIG_PAX_SEGMEXEC
44130 + BUG_ON(pax_find_mirror_vma(vma));
44131 +#endif
44132 +
44133 /*
44134 * cover the whole range: [new_start, old_end)
44135 */
44136 @@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44137 stack_top = arch_align_stack(stack_top);
44138 stack_top = PAGE_ALIGN(stack_top);
44139
44140 - if (unlikely(stack_top < mmap_min_addr) ||
44141 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44142 - return -ENOMEM;
44143 -
44144 stack_shift = vma->vm_end - stack_top;
44145
44146 bprm->p -= stack_shift;
44147 @@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44148 bprm->exec -= stack_shift;
44149
44150 down_write(&mm->mmap_sem);
44151 +
44152 + /* Move stack pages down in memory. */
44153 + if (stack_shift) {
44154 + ret = shift_arg_pages(vma, stack_shift);
44155 + if (ret)
44156 + goto out_unlock;
44157 + }
44158 +
44159 vm_flags = VM_STACK_FLAGS;
44160
44161 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44162 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44163 + vm_flags &= ~VM_EXEC;
44164 +
44165 +#ifdef CONFIG_PAX_MPROTECT
44166 + if (mm->pax_flags & MF_PAX_MPROTECT)
44167 + vm_flags &= ~VM_MAYEXEC;
44168 +#endif
44169 +
44170 + }
44171 +#endif
44172 +
44173 /*
44174 * Adjust stack execute permissions; explicitly enable for
44175 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44176 @@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44177 goto out_unlock;
44178 BUG_ON(prev != vma);
44179
44180 - /* Move stack pages down in memory. */
44181 - if (stack_shift) {
44182 - ret = shift_arg_pages(vma, stack_shift);
44183 - if (ret)
44184 - goto out_unlock;
44185 - }
44186 -
44187 /* mprotect_fixup is overkill to remove the temporary stack flags */
44188 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44189
44190 @@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
44191 old_fs = get_fs();
44192 set_fs(get_ds());
44193 /* The cast to a user pointer is valid due to the set_fs() */
44194 - result = vfs_read(file, (void __user *)addr, count, &pos);
44195 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
44196 set_fs(old_fs);
44197 return result;
44198 }
44199 @@ -1252,7 +1284,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44200 }
44201 rcu_read_unlock();
44202
44203 - if (p->fs->users > n_fs) {
44204 + if (atomic_read(&p->fs->users) > n_fs) {
44205 bprm->unsafe |= LSM_UNSAFE_SHARE;
44206 } else {
44207 res = -EAGAIN;
44208 @@ -1447,6 +1479,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44209
44210 EXPORT_SYMBOL(search_binary_handler);
44211
44212 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44213 +static DEFINE_PER_CPU(u64, exec_counter);
44214 +static int __init init_exec_counters(void)
44215 +{
44216 + unsigned int cpu;
44217 +
44218 + for_each_possible_cpu(cpu) {
44219 + per_cpu(exec_counter, cpu) = (u64)cpu;
44220 + }
44221 +
44222 + return 0;
44223 +}
44224 +early_initcall(init_exec_counters);
44225 +static inline void increment_exec_counter(void)
44226 +{
44227 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44228 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44229 +}
44230 +#else
44231 +static inline void increment_exec_counter(void) {}
44232 +#endif
44233 +
44234 /*
44235 * sys_execve() executes a new program.
44236 */
44237 @@ -1455,6 +1509,11 @@ static int do_execve_common(const char *filename,
44238 struct user_arg_ptr envp,
44239 struct pt_regs *regs)
44240 {
44241 +#ifdef CONFIG_GRKERNSEC
44242 + struct file *old_exec_file;
44243 + struct acl_subject_label *old_acl;
44244 + struct rlimit old_rlim[RLIM_NLIMITS];
44245 +#endif
44246 struct linux_binprm *bprm;
44247 struct file *file;
44248 struct files_struct *displaced;
44249 @@ -1462,6 +1521,8 @@ static int do_execve_common(const char *filename,
44250 int retval;
44251 const struct cred *cred = current_cred();
44252
44253 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44254 +
44255 /*
44256 * We move the actual failure in case of RLIMIT_NPROC excess from
44257 * set*uid() to execve() because too many poorly written programs
44258 @@ -1502,12 +1563,27 @@ static int do_execve_common(const char *filename,
44259 if (IS_ERR(file))
44260 goto out_unmark;
44261
44262 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44263 + retval = -EPERM;
44264 + goto out_file;
44265 + }
44266 +
44267 sched_exec();
44268
44269 bprm->file = file;
44270 bprm->filename = filename;
44271 bprm->interp = filename;
44272
44273 + if (gr_process_user_ban()) {
44274 + retval = -EPERM;
44275 + goto out_file;
44276 + }
44277 +
44278 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44279 + retval = -EACCES;
44280 + goto out_file;
44281 + }
44282 +
44283 retval = bprm_mm_init(bprm);
44284 if (retval)
44285 goto out_file;
44286 @@ -1524,24 +1600,65 @@ static int do_execve_common(const char *filename,
44287 if (retval < 0)
44288 goto out;
44289
44290 +#ifdef CONFIG_GRKERNSEC
44291 + old_acl = current->acl;
44292 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44293 + old_exec_file = current->exec_file;
44294 + get_file(file);
44295 + current->exec_file = file;
44296 +#endif
44297 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44298 + /* limit suid stack to 8MB
44299 + we saved the old limits above and will restore them if this exec fails
44300 + */
44301 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44302 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44303 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44304 +#endif
44305 +
44306 + if (!gr_tpe_allow(file)) {
44307 + retval = -EACCES;
44308 + goto out_fail;
44309 + }
44310 +
44311 + if (gr_check_crash_exec(file)) {
44312 + retval = -EACCES;
44313 + goto out_fail;
44314 + }
44315 +
44316 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44317 + bprm->unsafe);
44318 + if (retval < 0)
44319 + goto out_fail;
44320 +
44321 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44322 if (retval < 0)
44323 - goto out;
44324 + goto out_fail;
44325
44326 bprm->exec = bprm->p;
44327 retval = copy_strings(bprm->envc, envp, bprm);
44328 if (retval < 0)
44329 - goto out;
44330 + goto out_fail;
44331
44332 retval = copy_strings(bprm->argc, argv, bprm);
44333 if (retval < 0)
44334 - goto out;
44335 + goto out_fail;
44336 +
44337 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44338 +
44339 + gr_handle_exec_args(bprm, argv);
44340
44341 retval = search_binary_handler(bprm,regs);
44342 if (retval < 0)
44343 - goto out;
44344 + goto out_fail;
44345 +#ifdef CONFIG_GRKERNSEC
44346 + if (old_exec_file)
44347 + fput(old_exec_file);
44348 +#endif
44349
44350 /* execve succeeded */
44351 +
44352 + increment_exec_counter();
44353 current->fs->in_exec = 0;
44354 current->in_execve = 0;
44355 acct_update_integrals(current);
44356 @@ -1550,6 +1667,14 @@ static int do_execve_common(const char *filename,
44357 put_files_struct(displaced);
44358 return retval;
44359
44360 +out_fail:
44361 +#ifdef CONFIG_GRKERNSEC
44362 + current->acl = old_acl;
44363 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44364 + fput(current->exec_file);
44365 + current->exec_file = old_exec_file;
44366 +#endif
44367 +
44368 out:
44369 if (bprm->mm) {
44370 acct_arg_size(bprm, 0);
44371 @@ -1623,7 +1748,7 @@ static int expand_corename(struct core_name *cn)
44372 {
44373 char *old_corename = cn->corename;
44374
44375 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44376 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44377 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44378
44379 if (!cn->corename) {
44380 @@ -1720,7 +1845,7 @@ static int format_corename(struct core_name *cn, long signr)
44381 int pid_in_pattern = 0;
44382 int err = 0;
44383
44384 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44385 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44386 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44387 cn->used = 0;
44388
44389 @@ -1817,6 +1942,228 @@ out:
44390 return ispipe;
44391 }
44392
44393 +int pax_check_flags(unsigned long *flags)
44394 +{
44395 + int retval = 0;
44396 +
44397 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44398 + if (*flags & MF_PAX_SEGMEXEC)
44399 + {
44400 + *flags &= ~MF_PAX_SEGMEXEC;
44401 + retval = -EINVAL;
44402 + }
44403 +#endif
44404 +
44405 + if ((*flags & MF_PAX_PAGEEXEC)
44406 +
44407 +#ifdef CONFIG_PAX_PAGEEXEC
44408 + && (*flags & MF_PAX_SEGMEXEC)
44409 +#endif
44410 +
44411 + )
44412 + {
44413 + *flags &= ~MF_PAX_PAGEEXEC;
44414 + retval = -EINVAL;
44415 + }
44416 +
44417 + if ((*flags & MF_PAX_MPROTECT)
44418 +
44419 +#ifdef CONFIG_PAX_MPROTECT
44420 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44421 +#endif
44422 +
44423 + )
44424 + {
44425 + *flags &= ~MF_PAX_MPROTECT;
44426 + retval = -EINVAL;
44427 + }
44428 +
44429 + if ((*flags & MF_PAX_EMUTRAMP)
44430 +
44431 +#ifdef CONFIG_PAX_EMUTRAMP
44432 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44433 +#endif
44434 +
44435 + )
44436 + {
44437 + *flags &= ~MF_PAX_EMUTRAMP;
44438 + retval = -EINVAL;
44439 + }
44440 +
44441 + return retval;
44442 +}
44443 +
44444 +EXPORT_SYMBOL(pax_check_flags);
44445 +
44446 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44447 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44448 +{
44449 + struct task_struct *tsk = current;
44450 + struct mm_struct *mm = current->mm;
44451 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44452 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44453 + char *path_exec = NULL;
44454 + char *path_fault = NULL;
44455 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44456 +
44457 + if (buffer_exec && buffer_fault) {
44458 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44459 +
44460 + down_read(&mm->mmap_sem);
44461 + vma = mm->mmap;
44462 + while (vma && (!vma_exec || !vma_fault)) {
44463 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44464 + vma_exec = vma;
44465 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44466 + vma_fault = vma;
44467 + vma = vma->vm_next;
44468 + }
44469 + if (vma_exec) {
44470 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44471 + if (IS_ERR(path_exec))
44472 + path_exec = "<path too long>";
44473 + else {
44474 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44475 + if (path_exec) {
44476 + *path_exec = 0;
44477 + path_exec = buffer_exec;
44478 + } else
44479 + path_exec = "<path too long>";
44480 + }
44481 + }
44482 + if (vma_fault) {
44483 + start = vma_fault->vm_start;
44484 + end = vma_fault->vm_end;
44485 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44486 + if (vma_fault->vm_file) {
44487 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44488 + if (IS_ERR(path_fault))
44489 + path_fault = "<path too long>";
44490 + else {
44491 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44492 + if (path_fault) {
44493 + *path_fault = 0;
44494 + path_fault = buffer_fault;
44495 + } else
44496 + path_fault = "<path too long>";
44497 + }
44498 + } else
44499 + path_fault = "<anonymous mapping>";
44500 + }
44501 + up_read(&mm->mmap_sem);
44502 + }
44503 + if (tsk->signal->curr_ip)
44504 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44505 + else
44506 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44507 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44508 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44509 + task_uid(tsk), task_euid(tsk), pc, sp);
44510 + free_page((unsigned long)buffer_exec);
44511 + free_page((unsigned long)buffer_fault);
44512 + pax_report_insns(regs, pc, sp);
44513 + do_coredump(SIGKILL, SIGKILL, regs);
44514 +}
44515 +#endif
44516 +
44517 +#ifdef CONFIG_PAX_REFCOUNT
44518 +void pax_report_refcount_overflow(struct pt_regs *regs)
44519 +{
44520 + if (current->signal->curr_ip)
44521 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44522 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44523 + else
44524 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44525 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44526 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44527 + show_regs(regs);
44528 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44529 +}
44530 +#endif
44531 +
44532 +#ifdef CONFIG_PAX_USERCOPY
44533 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44534 +int object_is_on_stack(const void *obj, unsigned long len)
44535 +{
44536 + const void * const stack = task_stack_page(current);
44537 + const void * const stackend = stack + THREAD_SIZE;
44538 +
44539 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44540 + const void *frame = NULL;
44541 + const void *oldframe;
44542 +#endif
44543 +
44544 + if (obj + len < obj)
44545 + return -1;
44546 +
44547 + if (obj + len <= stack || stackend <= obj)
44548 + return 0;
44549 +
44550 + if (obj < stack || stackend < obj + len)
44551 + return -1;
44552 +
44553 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44554 + oldframe = __builtin_frame_address(1);
44555 + if (oldframe)
44556 + frame = __builtin_frame_address(2);
44557 + /*
44558 + low ----------------------------------------------> high
44559 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44560 + ^----------------^
44561 + allow copies only within here
44562 + */
44563 + while (stack <= frame && frame < stackend) {
44564 + /* if obj + len extends past the last frame, this
44565 + check won't pass and the next frame will be 0,
44566 + causing us to bail out and correctly report
44567 + the copy as invalid
44568 + */
44569 + if (obj + len <= frame)
44570 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44571 + oldframe = frame;
44572 + frame = *(const void * const *)frame;
44573 + }
44574 + return -1;
44575 +#else
44576 + return 1;
44577 +#endif
44578 +}
44579 +
44580 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44581 +{
44582 + if (current->signal->curr_ip)
44583 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44584 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44585 + else
44586 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44587 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44588 + dump_stack();
44589 + gr_handle_kernel_exploit();
44590 + do_group_exit(SIGKILL);
44591 +}
44592 +#endif
44593 +
44594 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44595 +void pax_track_stack(void)
44596 +{
44597 + unsigned long sp = (unsigned long)&sp;
44598 + if (sp < current_thread_info()->lowest_stack &&
44599 + sp > (unsigned long)task_stack_page(current))
44600 + current_thread_info()->lowest_stack = sp;
44601 +}
44602 +EXPORT_SYMBOL(pax_track_stack);
44603 +#endif
44604 +
44605 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
44606 +void report_size_overflow(const char *file, unsigned int line, const char *func)
44607 +{
44608 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44609 + dump_stack();
44610 + do_group_exit(SIGKILL);
44611 +}
44612 +EXPORT_SYMBOL(report_size_overflow);
44613 +#endif
44614 +
44615 static int zap_process(struct task_struct *start, int exit_code)
44616 {
44617 struct task_struct *t;
44618 @@ -2014,17 +2361,17 @@ static void wait_for_dump_helpers(struct file *file)
44619 pipe = file->f_path.dentry->d_inode->i_pipe;
44620
44621 pipe_lock(pipe);
44622 - pipe->readers++;
44623 - pipe->writers--;
44624 + atomic_inc(&pipe->readers);
44625 + atomic_dec(&pipe->writers);
44626
44627 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44628 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44629 wake_up_interruptible_sync(&pipe->wait);
44630 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44631 pipe_wait(pipe);
44632 }
44633
44634 - pipe->readers--;
44635 - pipe->writers++;
44636 + atomic_dec(&pipe->readers);
44637 + atomic_inc(&pipe->writers);
44638 pipe_unlock(pipe);
44639
44640 }
44641 @@ -2085,7 +2432,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44642 int retval = 0;
44643 int flag = 0;
44644 int ispipe;
44645 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44646 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44647 struct coredump_params cprm = {
44648 .signr = signr,
44649 .regs = regs,
44650 @@ -2100,6 +2447,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44651
44652 audit_core_dumps(signr);
44653
44654 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44655 + gr_handle_brute_attach(current, cprm.mm_flags);
44656 +
44657 binfmt = mm->binfmt;
44658 if (!binfmt || !binfmt->core_dump)
44659 goto fail;
44660 @@ -2167,7 +2517,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44661 }
44662 cprm.limit = RLIM_INFINITY;
44663
44664 - dump_count = atomic_inc_return(&core_dump_count);
44665 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44666 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44667 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44668 task_tgid_vnr(current), current->comm);
44669 @@ -2194,6 +2544,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44670 } else {
44671 struct inode *inode;
44672
44673 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44674 +
44675 if (cprm.limit < binfmt->min_coredump)
44676 goto fail_unlock;
44677
44678 @@ -2237,7 +2589,7 @@ close_fail:
44679 filp_close(cprm.file, NULL);
44680 fail_dropcount:
44681 if (ispipe)
44682 - atomic_dec(&core_dump_count);
44683 + atomic_dec_unchecked(&core_dump_count);
44684 fail_unlock:
44685 kfree(cn.corename);
44686 fail_corename:
44687 @@ -2256,7 +2608,7 @@ fail:
44688 */
44689 int dump_write(struct file *file, const void *addr, int nr)
44690 {
44691 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44692 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44693 }
44694 EXPORT_SYMBOL(dump_write);
44695
44696 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44697 index a8cbe1b..fed04cb 100644
44698 --- a/fs/ext2/balloc.c
44699 +++ b/fs/ext2/balloc.c
44700 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44701
44702 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44703 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44704 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44705 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44706 sbi->s_resuid != current_fsuid() &&
44707 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44708 return 0;
44709 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44710 index a203892..4e64db5 100644
44711 --- a/fs/ext3/balloc.c
44712 +++ b/fs/ext3/balloc.c
44713 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44714
44715 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44716 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44717 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44718 + if (free_blocks < root_blocks + 1 &&
44719 !use_reservation && sbi->s_resuid != current_fsuid() &&
44720 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44721 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44722 + !capable_nolog(CAP_SYS_RESOURCE)) {
44723 return 0;
44724 }
44725 return 1;
44726 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44727 index f9e2cd8..bfdc476 100644
44728 --- a/fs/ext4/balloc.c
44729 +++ b/fs/ext4/balloc.c
44730 @@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44731 /* Hm, nope. Are (enough) root reserved clusters available? */
44732 if (sbi->s_resuid == current_fsuid() ||
44733 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44734 - capable(CAP_SYS_RESOURCE) ||
44735 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44736 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44737 + capable_nolog(CAP_SYS_RESOURCE)) {
44738
44739 if (free_clusters >= (nclusters + dirty_clusters))
44740 return 1;
44741 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44742 index 9983ba8..2a5272c 100644
44743 --- a/fs/ext4/ext4.h
44744 +++ b/fs/ext4/ext4.h
44745 @@ -1217,19 +1217,19 @@ struct ext4_sb_info {
44746 unsigned long s_mb_last_start;
44747
44748 /* stats for buddy allocator */
44749 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44750 - atomic_t s_bal_success; /* we found long enough chunks */
44751 - atomic_t s_bal_allocated; /* in blocks */
44752 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44753 - atomic_t s_bal_goals; /* goal hits */
44754 - atomic_t s_bal_breaks; /* too long searches */
44755 - atomic_t s_bal_2orders; /* 2^order hits */
44756 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44757 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44758 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44759 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44760 + atomic_unchecked_t s_bal_goals; /* goal hits */
44761 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44762 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44763 spinlock_t s_bal_lock;
44764 unsigned long s_mb_buddies_generated;
44765 unsigned long long s_mb_generation_time;
44766 - atomic_t s_mb_lost_chunks;
44767 - atomic_t s_mb_preallocated;
44768 - atomic_t s_mb_discarded;
44769 + atomic_unchecked_t s_mb_lost_chunks;
44770 + atomic_unchecked_t s_mb_preallocated;
44771 + atomic_unchecked_t s_mb_discarded;
44772 atomic_t s_lock_busy;
44773
44774 /* locality groups */
44775 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44776 index cb990b2..4820141 100644
44777 --- a/fs/ext4/mballoc.c
44778 +++ b/fs/ext4/mballoc.c
44779 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44780 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44781
44782 if (EXT4_SB(sb)->s_mb_stats)
44783 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44784 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44785
44786 break;
44787 }
44788 @@ -2088,7 +2088,7 @@ repeat:
44789 ac->ac_status = AC_STATUS_CONTINUE;
44790 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44791 cr = 3;
44792 - atomic_inc(&sbi->s_mb_lost_chunks);
44793 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44794 goto repeat;
44795 }
44796 }
44797 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
44798 if (sbi->s_mb_stats) {
44799 ext4_msg(sb, KERN_INFO,
44800 "mballoc: %u blocks %u reqs (%u success)",
44801 - atomic_read(&sbi->s_bal_allocated),
44802 - atomic_read(&sbi->s_bal_reqs),
44803 - atomic_read(&sbi->s_bal_success));
44804 + atomic_read_unchecked(&sbi->s_bal_allocated),
44805 + atomic_read_unchecked(&sbi->s_bal_reqs),
44806 + atomic_read_unchecked(&sbi->s_bal_success));
44807 ext4_msg(sb, KERN_INFO,
44808 "mballoc: %u extents scanned, %u goal hits, "
44809 "%u 2^N hits, %u breaks, %u lost",
44810 - atomic_read(&sbi->s_bal_ex_scanned),
44811 - atomic_read(&sbi->s_bal_goals),
44812 - atomic_read(&sbi->s_bal_2orders),
44813 - atomic_read(&sbi->s_bal_breaks),
44814 - atomic_read(&sbi->s_mb_lost_chunks));
44815 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44816 + atomic_read_unchecked(&sbi->s_bal_goals),
44817 + atomic_read_unchecked(&sbi->s_bal_2orders),
44818 + atomic_read_unchecked(&sbi->s_bal_breaks),
44819 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44820 ext4_msg(sb, KERN_INFO,
44821 "mballoc: %lu generated and it took %Lu",
44822 sbi->s_mb_buddies_generated,
44823 sbi->s_mb_generation_time);
44824 ext4_msg(sb, KERN_INFO,
44825 "mballoc: %u preallocated, %u discarded",
44826 - atomic_read(&sbi->s_mb_preallocated),
44827 - atomic_read(&sbi->s_mb_discarded));
44828 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44829 + atomic_read_unchecked(&sbi->s_mb_discarded));
44830 }
44831
44832 free_percpu(sbi->s_locality_groups);
44833 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44834 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44835
44836 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44837 - atomic_inc(&sbi->s_bal_reqs);
44838 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44839 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44840 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44841 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44842 - atomic_inc(&sbi->s_bal_success);
44843 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44844 + atomic_inc_unchecked(&sbi->s_bal_success);
44845 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44846 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44847 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44848 - atomic_inc(&sbi->s_bal_goals);
44849 + atomic_inc_unchecked(&sbi->s_bal_goals);
44850 if (ac->ac_found > sbi->s_mb_max_to_scan)
44851 - atomic_inc(&sbi->s_bal_breaks);
44852 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44853 }
44854
44855 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44856 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44857 trace_ext4_mb_new_inode_pa(ac, pa);
44858
44859 ext4_mb_use_inode_pa(ac, pa);
44860 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44861 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44862
44863 ei = EXT4_I(ac->ac_inode);
44864 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44865 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44866 trace_ext4_mb_new_group_pa(ac, pa);
44867
44868 ext4_mb_use_group_pa(ac, pa);
44869 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44870 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44871
44872 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44873 lg = ac->ac_lg;
44874 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44875 * from the bitmap and continue.
44876 */
44877 }
44878 - atomic_add(free, &sbi->s_mb_discarded);
44879 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44880
44881 return err;
44882 }
44883 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44884 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44885 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44886 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44887 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44888 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44889 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44890
44891 return 0;
44892 diff --git a/fs/fcntl.c b/fs/fcntl.c
44893 index 22764c7..86372c9 100644
44894 --- a/fs/fcntl.c
44895 +++ b/fs/fcntl.c
44896 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44897 if (err)
44898 return err;
44899
44900 + if (gr_handle_chroot_fowner(pid, type))
44901 + return -ENOENT;
44902 + if (gr_check_protected_task_fowner(pid, type))
44903 + return -EACCES;
44904 +
44905 f_modown(filp, pid, type, force);
44906 return 0;
44907 }
44908 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44909
44910 static int f_setown_ex(struct file *filp, unsigned long arg)
44911 {
44912 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44913 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44914 struct f_owner_ex owner;
44915 struct pid *pid;
44916 int type;
44917 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44918
44919 static int f_getown_ex(struct file *filp, unsigned long arg)
44920 {
44921 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44922 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44923 struct f_owner_ex owner;
44924 int ret = 0;
44925
44926 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44927 switch (cmd) {
44928 case F_DUPFD:
44929 case F_DUPFD_CLOEXEC:
44930 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44931 if (arg >= rlimit(RLIMIT_NOFILE))
44932 break;
44933 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44934 diff --git a/fs/fifo.c b/fs/fifo.c
44935 index b1a524d..4ee270e 100644
44936 --- a/fs/fifo.c
44937 +++ b/fs/fifo.c
44938 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44939 */
44940 filp->f_op = &read_pipefifo_fops;
44941 pipe->r_counter++;
44942 - if (pipe->readers++ == 0)
44943 + if (atomic_inc_return(&pipe->readers) == 1)
44944 wake_up_partner(inode);
44945
44946 - if (!pipe->writers) {
44947 + if (!atomic_read(&pipe->writers)) {
44948 if ((filp->f_flags & O_NONBLOCK)) {
44949 /* suppress POLLHUP until we have
44950 * seen a writer */
44951 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44952 * errno=ENXIO when there is no process reading the FIFO.
44953 */
44954 ret = -ENXIO;
44955 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44956 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44957 goto err;
44958
44959 filp->f_op = &write_pipefifo_fops;
44960 pipe->w_counter++;
44961 - if (!pipe->writers++)
44962 + if (atomic_inc_return(&pipe->writers) == 1)
44963 wake_up_partner(inode);
44964
44965 - if (!pipe->readers) {
44966 + if (!atomic_read(&pipe->readers)) {
44967 wait_for_partner(inode, &pipe->r_counter);
44968 if (signal_pending(current))
44969 goto err_wr;
44970 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44971 */
44972 filp->f_op = &rdwr_pipefifo_fops;
44973
44974 - pipe->readers++;
44975 - pipe->writers++;
44976 + atomic_inc(&pipe->readers);
44977 + atomic_inc(&pipe->writers);
44978 pipe->r_counter++;
44979 pipe->w_counter++;
44980 - if (pipe->readers == 1 || pipe->writers == 1)
44981 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44982 wake_up_partner(inode);
44983 break;
44984
44985 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44986 return 0;
44987
44988 err_rd:
44989 - if (!--pipe->readers)
44990 + if (atomic_dec_and_test(&pipe->readers))
44991 wake_up_interruptible(&pipe->wait);
44992 ret = -ERESTARTSYS;
44993 goto err;
44994
44995 err_wr:
44996 - if (!--pipe->writers)
44997 + if (atomic_dec_and_test(&pipe->writers))
44998 wake_up_interruptible(&pipe->wait);
44999 ret = -ERESTARTSYS;
45000 goto err;
45001
45002 err:
45003 - if (!pipe->readers && !pipe->writers)
45004 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45005 free_pipe_info(inode);
45006
45007 err_nocleanup:
45008 diff --git a/fs/file.c b/fs/file.c
45009 index 4c6992d..104cdea 100644
45010 --- a/fs/file.c
45011 +++ b/fs/file.c
45012 @@ -15,6 +15,7 @@
45013 #include <linux/slab.h>
45014 #include <linux/vmalloc.h>
45015 #include <linux/file.h>
45016 +#include <linux/security.h>
45017 #include <linux/fdtable.h>
45018 #include <linux/bitops.h>
45019 #include <linux/interrupt.h>
45020 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
45021 * N.B. For clone tasks sharing a files structure, this test
45022 * will limit the total number of files that can be opened.
45023 */
45024 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45025 if (nr >= rlimit(RLIMIT_NOFILE))
45026 return -EMFILE;
45027
45028 diff --git a/fs/filesystems.c b/fs/filesystems.c
45029 index 96f2428..f5eeb8e 100644
45030 --- a/fs/filesystems.c
45031 +++ b/fs/filesystems.c
45032 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45033 int len = dot ? dot - name : strlen(name);
45034
45035 fs = __get_fs_type(name, len);
45036 +
45037 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
45038 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45039 +#else
45040 if (!fs && (request_module("%.*s", len, name) == 0))
45041 +#endif
45042 fs = __get_fs_type(name, len);
45043
45044 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45045 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45046 index 78b519c..a8b4979 100644
45047 --- a/fs/fs_struct.c
45048 +++ b/fs/fs_struct.c
45049 @@ -4,6 +4,7 @@
45050 #include <linux/path.h>
45051 #include <linux/slab.h>
45052 #include <linux/fs_struct.h>
45053 +#include <linux/grsecurity.h>
45054 #include "internal.h"
45055
45056 static inline void path_get_longterm(struct path *path)
45057 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45058 old_root = fs->root;
45059 fs->root = *path;
45060 path_get_longterm(path);
45061 + gr_set_chroot_entries(current, path);
45062 write_seqcount_end(&fs->seq);
45063 spin_unlock(&fs->lock);
45064 if (old_root.dentry)
45065 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45066 && fs->root.mnt == old_root->mnt) {
45067 path_get_longterm(new_root);
45068 fs->root = *new_root;
45069 + gr_set_chroot_entries(p, new_root);
45070 count++;
45071 }
45072 if (fs->pwd.dentry == old_root->dentry
45073 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45074 spin_lock(&fs->lock);
45075 write_seqcount_begin(&fs->seq);
45076 tsk->fs = NULL;
45077 - kill = !--fs->users;
45078 + gr_clear_chroot_entries(tsk);
45079 + kill = !atomic_dec_return(&fs->users);
45080 write_seqcount_end(&fs->seq);
45081 spin_unlock(&fs->lock);
45082 task_unlock(tsk);
45083 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45084 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45085 /* We don't need to lock fs - think why ;-) */
45086 if (fs) {
45087 - fs->users = 1;
45088 + atomic_set(&fs->users, 1);
45089 fs->in_exec = 0;
45090 spin_lock_init(&fs->lock);
45091 seqcount_init(&fs->seq);
45092 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45093 spin_lock(&old->lock);
45094 fs->root = old->root;
45095 path_get_longterm(&fs->root);
45096 + /* instead of calling gr_set_chroot_entries here,
45097 + we call it from every caller of this function
45098 + */
45099 fs->pwd = old->pwd;
45100 path_get_longterm(&fs->pwd);
45101 spin_unlock(&old->lock);
45102 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45103
45104 task_lock(current);
45105 spin_lock(&fs->lock);
45106 - kill = !--fs->users;
45107 + kill = !atomic_dec_return(&fs->users);
45108 current->fs = new_fs;
45109 + gr_set_chroot_entries(current, &new_fs->root);
45110 spin_unlock(&fs->lock);
45111 task_unlock(current);
45112
45113 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45114
45115 int current_umask(void)
45116 {
45117 - return current->fs->umask;
45118 + return current->fs->umask | gr_acl_umask();
45119 }
45120 EXPORT_SYMBOL(current_umask);
45121
45122 /* to be mentioned only in INIT_TASK */
45123 struct fs_struct init_fs = {
45124 - .users = 1,
45125 + .users = ATOMIC_INIT(1),
45126 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45127 .seq = SEQCNT_ZERO,
45128 .umask = 0022,
45129 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45130 task_lock(current);
45131
45132 spin_lock(&init_fs.lock);
45133 - init_fs.users++;
45134 + atomic_inc(&init_fs.users);
45135 spin_unlock(&init_fs.lock);
45136
45137 spin_lock(&fs->lock);
45138 current->fs = &init_fs;
45139 - kill = !--fs->users;
45140 + gr_set_chroot_entries(current, &current->fs->root);
45141 + kill = !atomic_dec_return(&fs->users);
45142 spin_unlock(&fs->lock);
45143
45144 task_unlock(current);
45145 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45146 index 9905350..02eaec4 100644
45147 --- a/fs/fscache/cookie.c
45148 +++ b/fs/fscache/cookie.c
45149 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45150 parent ? (char *) parent->def->name : "<no-parent>",
45151 def->name, netfs_data);
45152
45153 - fscache_stat(&fscache_n_acquires);
45154 + fscache_stat_unchecked(&fscache_n_acquires);
45155
45156 /* if there's no parent cookie, then we don't create one here either */
45157 if (!parent) {
45158 - fscache_stat(&fscache_n_acquires_null);
45159 + fscache_stat_unchecked(&fscache_n_acquires_null);
45160 _leave(" [no parent]");
45161 return NULL;
45162 }
45163 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45164 /* allocate and initialise a cookie */
45165 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45166 if (!cookie) {
45167 - fscache_stat(&fscache_n_acquires_oom);
45168 + fscache_stat_unchecked(&fscache_n_acquires_oom);
45169 _leave(" [ENOMEM]");
45170 return NULL;
45171 }
45172 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45173
45174 switch (cookie->def->type) {
45175 case FSCACHE_COOKIE_TYPE_INDEX:
45176 - fscache_stat(&fscache_n_cookie_index);
45177 + fscache_stat_unchecked(&fscache_n_cookie_index);
45178 break;
45179 case FSCACHE_COOKIE_TYPE_DATAFILE:
45180 - fscache_stat(&fscache_n_cookie_data);
45181 + fscache_stat_unchecked(&fscache_n_cookie_data);
45182 break;
45183 default:
45184 - fscache_stat(&fscache_n_cookie_special);
45185 + fscache_stat_unchecked(&fscache_n_cookie_special);
45186 break;
45187 }
45188
45189 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45190 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45191 atomic_dec(&parent->n_children);
45192 __fscache_cookie_put(cookie);
45193 - fscache_stat(&fscache_n_acquires_nobufs);
45194 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45195 _leave(" = NULL");
45196 return NULL;
45197 }
45198 }
45199
45200 - fscache_stat(&fscache_n_acquires_ok);
45201 + fscache_stat_unchecked(&fscache_n_acquires_ok);
45202 _leave(" = %p", cookie);
45203 return cookie;
45204 }
45205 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45206 cache = fscache_select_cache_for_object(cookie->parent);
45207 if (!cache) {
45208 up_read(&fscache_addremove_sem);
45209 - fscache_stat(&fscache_n_acquires_no_cache);
45210 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45211 _leave(" = -ENOMEDIUM [no cache]");
45212 return -ENOMEDIUM;
45213 }
45214 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45215 object = cache->ops->alloc_object(cache, cookie);
45216 fscache_stat_d(&fscache_n_cop_alloc_object);
45217 if (IS_ERR(object)) {
45218 - fscache_stat(&fscache_n_object_no_alloc);
45219 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
45220 ret = PTR_ERR(object);
45221 goto error;
45222 }
45223
45224 - fscache_stat(&fscache_n_object_alloc);
45225 + fscache_stat_unchecked(&fscache_n_object_alloc);
45226
45227 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45228
45229 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45230 struct fscache_object *object;
45231 struct hlist_node *_p;
45232
45233 - fscache_stat(&fscache_n_updates);
45234 + fscache_stat_unchecked(&fscache_n_updates);
45235
45236 if (!cookie) {
45237 - fscache_stat(&fscache_n_updates_null);
45238 + fscache_stat_unchecked(&fscache_n_updates_null);
45239 _leave(" [no cookie]");
45240 return;
45241 }
45242 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45243 struct fscache_object *object;
45244 unsigned long event;
45245
45246 - fscache_stat(&fscache_n_relinquishes);
45247 + fscache_stat_unchecked(&fscache_n_relinquishes);
45248 if (retire)
45249 - fscache_stat(&fscache_n_relinquishes_retire);
45250 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45251
45252 if (!cookie) {
45253 - fscache_stat(&fscache_n_relinquishes_null);
45254 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
45255 _leave(" [no cookie]");
45256 return;
45257 }
45258 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45259
45260 /* wait for the cookie to finish being instantiated (or to fail) */
45261 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45262 - fscache_stat(&fscache_n_relinquishes_waitcrt);
45263 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45264 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45265 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45266 }
45267 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45268 index f6aad48..88dcf26 100644
45269 --- a/fs/fscache/internal.h
45270 +++ b/fs/fscache/internal.h
45271 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45272 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45273 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45274
45275 -extern atomic_t fscache_n_op_pend;
45276 -extern atomic_t fscache_n_op_run;
45277 -extern atomic_t fscache_n_op_enqueue;
45278 -extern atomic_t fscache_n_op_deferred_release;
45279 -extern atomic_t fscache_n_op_release;
45280 -extern atomic_t fscache_n_op_gc;
45281 -extern atomic_t fscache_n_op_cancelled;
45282 -extern atomic_t fscache_n_op_rejected;
45283 +extern atomic_unchecked_t fscache_n_op_pend;
45284 +extern atomic_unchecked_t fscache_n_op_run;
45285 +extern atomic_unchecked_t fscache_n_op_enqueue;
45286 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45287 +extern atomic_unchecked_t fscache_n_op_release;
45288 +extern atomic_unchecked_t fscache_n_op_gc;
45289 +extern atomic_unchecked_t fscache_n_op_cancelled;
45290 +extern atomic_unchecked_t fscache_n_op_rejected;
45291
45292 -extern atomic_t fscache_n_attr_changed;
45293 -extern atomic_t fscache_n_attr_changed_ok;
45294 -extern atomic_t fscache_n_attr_changed_nobufs;
45295 -extern atomic_t fscache_n_attr_changed_nomem;
45296 -extern atomic_t fscache_n_attr_changed_calls;
45297 +extern atomic_unchecked_t fscache_n_attr_changed;
45298 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45299 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45300 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45301 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45302
45303 -extern atomic_t fscache_n_allocs;
45304 -extern atomic_t fscache_n_allocs_ok;
45305 -extern atomic_t fscache_n_allocs_wait;
45306 -extern atomic_t fscache_n_allocs_nobufs;
45307 -extern atomic_t fscache_n_allocs_intr;
45308 -extern atomic_t fscache_n_allocs_object_dead;
45309 -extern atomic_t fscache_n_alloc_ops;
45310 -extern atomic_t fscache_n_alloc_op_waits;
45311 +extern atomic_unchecked_t fscache_n_allocs;
45312 +extern atomic_unchecked_t fscache_n_allocs_ok;
45313 +extern atomic_unchecked_t fscache_n_allocs_wait;
45314 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45315 +extern atomic_unchecked_t fscache_n_allocs_intr;
45316 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45317 +extern atomic_unchecked_t fscache_n_alloc_ops;
45318 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45319
45320 -extern atomic_t fscache_n_retrievals;
45321 -extern atomic_t fscache_n_retrievals_ok;
45322 -extern atomic_t fscache_n_retrievals_wait;
45323 -extern atomic_t fscache_n_retrievals_nodata;
45324 -extern atomic_t fscache_n_retrievals_nobufs;
45325 -extern atomic_t fscache_n_retrievals_intr;
45326 -extern atomic_t fscache_n_retrievals_nomem;
45327 -extern atomic_t fscache_n_retrievals_object_dead;
45328 -extern atomic_t fscache_n_retrieval_ops;
45329 -extern atomic_t fscache_n_retrieval_op_waits;
45330 +extern atomic_unchecked_t fscache_n_retrievals;
45331 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45332 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45333 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45334 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45335 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45336 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45337 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45338 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45339 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45340
45341 -extern atomic_t fscache_n_stores;
45342 -extern atomic_t fscache_n_stores_ok;
45343 -extern atomic_t fscache_n_stores_again;
45344 -extern atomic_t fscache_n_stores_nobufs;
45345 -extern atomic_t fscache_n_stores_oom;
45346 -extern atomic_t fscache_n_store_ops;
45347 -extern atomic_t fscache_n_store_calls;
45348 -extern atomic_t fscache_n_store_pages;
45349 -extern atomic_t fscache_n_store_radix_deletes;
45350 -extern atomic_t fscache_n_store_pages_over_limit;
45351 +extern atomic_unchecked_t fscache_n_stores;
45352 +extern atomic_unchecked_t fscache_n_stores_ok;
45353 +extern atomic_unchecked_t fscache_n_stores_again;
45354 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45355 +extern atomic_unchecked_t fscache_n_stores_oom;
45356 +extern atomic_unchecked_t fscache_n_store_ops;
45357 +extern atomic_unchecked_t fscache_n_store_calls;
45358 +extern atomic_unchecked_t fscache_n_store_pages;
45359 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45360 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45361
45362 -extern atomic_t fscache_n_store_vmscan_not_storing;
45363 -extern atomic_t fscache_n_store_vmscan_gone;
45364 -extern atomic_t fscache_n_store_vmscan_busy;
45365 -extern atomic_t fscache_n_store_vmscan_cancelled;
45366 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45367 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45368 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45369 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45370
45371 -extern atomic_t fscache_n_marks;
45372 -extern atomic_t fscache_n_uncaches;
45373 +extern atomic_unchecked_t fscache_n_marks;
45374 +extern atomic_unchecked_t fscache_n_uncaches;
45375
45376 -extern atomic_t fscache_n_acquires;
45377 -extern atomic_t fscache_n_acquires_null;
45378 -extern atomic_t fscache_n_acquires_no_cache;
45379 -extern atomic_t fscache_n_acquires_ok;
45380 -extern atomic_t fscache_n_acquires_nobufs;
45381 -extern atomic_t fscache_n_acquires_oom;
45382 +extern atomic_unchecked_t fscache_n_acquires;
45383 +extern atomic_unchecked_t fscache_n_acquires_null;
45384 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45385 +extern atomic_unchecked_t fscache_n_acquires_ok;
45386 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45387 +extern atomic_unchecked_t fscache_n_acquires_oom;
45388
45389 -extern atomic_t fscache_n_updates;
45390 -extern atomic_t fscache_n_updates_null;
45391 -extern atomic_t fscache_n_updates_run;
45392 +extern atomic_unchecked_t fscache_n_updates;
45393 +extern atomic_unchecked_t fscache_n_updates_null;
45394 +extern atomic_unchecked_t fscache_n_updates_run;
45395
45396 -extern atomic_t fscache_n_relinquishes;
45397 -extern atomic_t fscache_n_relinquishes_null;
45398 -extern atomic_t fscache_n_relinquishes_waitcrt;
45399 -extern atomic_t fscache_n_relinquishes_retire;
45400 +extern atomic_unchecked_t fscache_n_relinquishes;
45401 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45402 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45403 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45404
45405 -extern atomic_t fscache_n_cookie_index;
45406 -extern atomic_t fscache_n_cookie_data;
45407 -extern atomic_t fscache_n_cookie_special;
45408 +extern atomic_unchecked_t fscache_n_cookie_index;
45409 +extern atomic_unchecked_t fscache_n_cookie_data;
45410 +extern atomic_unchecked_t fscache_n_cookie_special;
45411
45412 -extern atomic_t fscache_n_object_alloc;
45413 -extern atomic_t fscache_n_object_no_alloc;
45414 -extern atomic_t fscache_n_object_lookups;
45415 -extern atomic_t fscache_n_object_lookups_negative;
45416 -extern atomic_t fscache_n_object_lookups_positive;
45417 -extern atomic_t fscache_n_object_lookups_timed_out;
45418 -extern atomic_t fscache_n_object_created;
45419 -extern atomic_t fscache_n_object_avail;
45420 -extern atomic_t fscache_n_object_dead;
45421 +extern atomic_unchecked_t fscache_n_object_alloc;
45422 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45423 +extern atomic_unchecked_t fscache_n_object_lookups;
45424 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45425 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45426 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45427 +extern atomic_unchecked_t fscache_n_object_created;
45428 +extern atomic_unchecked_t fscache_n_object_avail;
45429 +extern atomic_unchecked_t fscache_n_object_dead;
45430
45431 -extern atomic_t fscache_n_checkaux_none;
45432 -extern atomic_t fscache_n_checkaux_okay;
45433 -extern atomic_t fscache_n_checkaux_update;
45434 -extern atomic_t fscache_n_checkaux_obsolete;
45435 +extern atomic_unchecked_t fscache_n_checkaux_none;
45436 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45437 +extern atomic_unchecked_t fscache_n_checkaux_update;
45438 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45439
45440 extern atomic_t fscache_n_cop_alloc_object;
45441 extern atomic_t fscache_n_cop_lookup_object;
45442 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45443 atomic_inc(stat);
45444 }
45445
45446 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45447 +{
45448 + atomic_inc_unchecked(stat);
45449 +}
45450 +
45451 static inline void fscache_stat_d(atomic_t *stat)
45452 {
45453 atomic_dec(stat);
45454 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45455
45456 #define __fscache_stat(stat) (NULL)
45457 #define fscache_stat(stat) do {} while (0)
45458 +#define fscache_stat_unchecked(stat) do {} while (0)
45459 #define fscache_stat_d(stat) do {} while (0)
45460 #endif
45461
45462 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45463 index b6b897c..0ffff9c 100644
45464 --- a/fs/fscache/object.c
45465 +++ b/fs/fscache/object.c
45466 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45467 /* update the object metadata on disk */
45468 case FSCACHE_OBJECT_UPDATING:
45469 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45470 - fscache_stat(&fscache_n_updates_run);
45471 + fscache_stat_unchecked(&fscache_n_updates_run);
45472 fscache_stat(&fscache_n_cop_update_object);
45473 object->cache->ops->update_object(object);
45474 fscache_stat_d(&fscache_n_cop_update_object);
45475 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45476 spin_lock(&object->lock);
45477 object->state = FSCACHE_OBJECT_DEAD;
45478 spin_unlock(&object->lock);
45479 - fscache_stat(&fscache_n_object_dead);
45480 + fscache_stat_unchecked(&fscache_n_object_dead);
45481 goto terminal_transit;
45482
45483 /* handle the parent cache of this object being withdrawn from
45484 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45485 spin_lock(&object->lock);
45486 object->state = FSCACHE_OBJECT_DEAD;
45487 spin_unlock(&object->lock);
45488 - fscache_stat(&fscache_n_object_dead);
45489 + fscache_stat_unchecked(&fscache_n_object_dead);
45490 goto terminal_transit;
45491
45492 /* complain about the object being woken up once it is
45493 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45494 parent->cookie->def->name, cookie->def->name,
45495 object->cache->tag->name);
45496
45497 - fscache_stat(&fscache_n_object_lookups);
45498 + fscache_stat_unchecked(&fscache_n_object_lookups);
45499 fscache_stat(&fscache_n_cop_lookup_object);
45500 ret = object->cache->ops->lookup_object(object);
45501 fscache_stat_d(&fscache_n_cop_lookup_object);
45502 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45503 if (ret == -ETIMEDOUT) {
45504 /* probably stuck behind another object, so move this one to
45505 * the back of the queue */
45506 - fscache_stat(&fscache_n_object_lookups_timed_out);
45507 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45508 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45509 }
45510
45511 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45512
45513 spin_lock(&object->lock);
45514 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45515 - fscache_stat(&fscache_n_object_lookups_negative);
45516 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45517
45518 /* transit here to allow write requests to begin stacking up
45519 * and read requests to begin returning ENODATA */
45520 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45521 * result, in which case there may be data available */
45522 spin_lock(&object->lock);
45523 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45524 - fscache_stat(&fscache_n_object_lookups_positive);
45525 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45526
45527 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45528
45529 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45530 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45531 } else {
45532 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45533 - fscache_stat(&fscache_n_object_created);
45534 + fscache_stat_unchecked(&fscache_n_object_created);
45535
45536 object->state = FSCACHE_OBJECT_AVAILABLE;
45537 spin_unlock(&object->lock);
45538 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45539 fscache_enqueue_dependents(object);
45540
45541 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45542 - fscache_stat(&fscache_n_object_avail);
45543 + fscache_stat_unchecked(&fscache_n_object_avail);
45544
45545 _leave("");
45546 }
45547 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45548 enum fscache_checkaux result;
45549
45550 if (!object->cookie->def->check_aux) {
45551 - fscache_stat(&fscache_n_checkaux_none);
45552 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45553 return FSCACHE_CHECKAUX_OKAY;
45554 }
45555
45556 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45557 switch (result) {
45558 /* entry okay as is */
45559 case FSCACHE_CHECKAUX_OKAY:
45560 - fscache_stat(&fscache_n_checkaux_okay);
45561 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45562 break;
45563
45564 /* entry requires update */
45565 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45566 - fscache_stat(&fscache_n_checkaux_update);
45567 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45568 break;
45569
45570 /* entry requires deletion */
45571 case FSCACHE_CHECKAUX_OBSOLETE:
45572 - fscache_stat(&fscache_n_checkaux_obsolete);
45573 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45574 break;
45575
45576 default:
45577 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45578 index 30afdfa..2256596 100644
45579 --- a/fs/fscache/operation.c
45580 +++ b/fs/fscache/operation.c
45581 @@ -17,7 +17,7 @@
45582 #include <linux/slab.h>
45583 #include "internal.h"
45584
45585 -atomic_t fscache_op_debug_id;
45586 +atomic_unchecked_t fscache_op_debug_id;
45587 EXPORT_SYMBOL(fscache_op_debug_id);
45588
45589 /**
45590 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45591 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45592 ASSERTCMP(atomic_read(&op->usage), >, 0);
45593
45594 - fscache_stat(&fscache_n_op_enqueue);
45595 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45596 switch (op->flags & FSCACHE_OP_TYPE) {
45597 case FSCACHE_OP_ASYNC:
45598 _debug("queue async");
45599 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45600 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45601 if (op->processor)
45602 fscache_enqueue_operation(op);
45603 - fscache_stat(&fscache_n_op_run);
45604 + fscache_stat_unchecked(&fscache_n_op_run);
45605 }
45606
45607 /*
45608 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45609 if (object->n_ops > 1) {
45610 atomic_inc(&op->usage);
45611 list_add_tail(&op->pend_link, &object->pending_ops);
45612 - fscache_stat(&fscache_n_op_pend);
45613 + fscache_stat_unchecked(&fscache_n_op_pend);
45614 } else if (!list_empty(&object->pending_ops)) {
45615 atomic_inc(&op->usage);
45616 list_add_tail(&op->pend_link, &object->pending_ops);
45617 - fscache_stat(&fscache_n_op_pend);
45618 + fscache_stat_unchecked(&fscache_n_op_pend);
45619 fscache_start_operations(object);
45620 } else {
45621 ASSERTCMP(object->n_in_progress, ==, 0);
45622 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45623 object->n_exclusive++; /* reads and writes must wait */
45624 atomic_inc(&op->usage);
45625 list_add_tail(&op->pend_link, &object->pending_ops);
45626 - fscache_stat(&fscache_n_op_pend);
45627 + fscache_stat_unchecked(&fscache_n_op_pend);
45628 ret = 0;
45629 } else {
45630 /* not allowed to submit ops in any other state */
45631 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45632 if (object->n_exclusive > 0) {
45633 atomic_inc(&op->usage);
45634 list_add_tail(&op->pend_link, &object->pending_ops);
45635 - fscache_stat(&fscache_n_op_pend);
45636 + fscache_stat_unchecked(&fscache_n_op_pend);
45637 } else if (!list_empty(&object->pending_ops)) {
45638 atomic_inc(&op->usage);
45639 list_add_tail(&op->pend_link, &object->pending_ops);
45640 - fscache_stat(&fscache_n_op_pend);
45641 + fscache_stat_unchecked(&fscache_n_op_pend);
45642 fscache_start_operations(object);
45643 } else {
45644 ASSERTCMP(object->n_exclusive, ==, 0);
45645 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45646 object->n_ops++;
45647 atomic_inc(&op->usage);
45648 list_add_tail(&op->pend_link, &object->pending_ops);
45649 - fscache_stat(&fscache_n_op_pend);
45650 + fscache_stat_unchecked(&fscache_n_op_pend);
45651 ret = 0;
45652 } else if (object->state == FSCACHE_OBJECT_DYING ||
45653 object->state == FSCACHE_OBJECT_LC_DYING ||
45654 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45655 - fscache_stat(&fscache_n_op_rejected);
45656 + fscache_stat_unchecked(&fscache_n_op_rejected);
45657 ret = -ENOBUFS;
45658 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45659 fscache_report_unexpected_submission(object, op, ostate);
45660 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45661
45662 ret = -EBUSY;
45663 if (!list_empty(&op->pend_link)) {
45664 - fscache_stat(&fscache_n_op_cancelled);
45665 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45666 list_del_init(&op->pend_link);
45667 object->n_ops--;
45668 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45669 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45670 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45671 BUG();
45672
45673 - fscache_stat(&fscache_n_op_release);
45674 + fscache_stat_unchecked(&fscache_n_op_release);
45675
45676 if (op->release) {
45677 op->release(op);
45678 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45679 * lock, and defer it otherwise */
45680 if (!spin_trylock(&object->lock)) {
45681 _debug("defer put");
45682 - fscache_stat(&fscache_n_op_deferred_release);
45683 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45684
45685 cache = object->cache;
45686 spin_lock(&cache->op_gc_list_lock);
45687 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45688
45689 _debug("GC DEFERRED REL OBJ%x OP%x",
45690 object->debug_id, op->debug_id);
45691 - fscache_stat(&fscache_n_op_gc);
45692 + fscache_stat_unchecked(&fscache_n_op_gc);
45693
45694 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45695
45696 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45697 index 3f7a59b..cf196cc 100644
45698 --- a/fs/fscache/page.c
45699 +++ b/fs/fscache/page.c
45700 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45701 val = radix_tree_lookup(&cookie->stores, page->index);
45702 if (!val) {
45703 rcu_read_unlock();
45704 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45705 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45706 __fscache_uncache_page(cookie, page);
45707 return true;
45708 }
45709 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45710 spin_unlock(&cookie->stores_lock);
45711
45712 if (xpage) {
45713 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45714 - fscache_stat(&fscache_n_store_radix_deletes);
45715 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45716 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45717 ASSERTCMP(xpage, ==, page);
45718 } else {
45719 - fscache_stat(&fscache_n_store_vmscan_gone);
45720 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45721 }
45722
45723 wake_up_bit(&cookie->flags, 0);
45724 @@ -107,7 +107,7 @@ page_busy:
45725 /* we might want to wait here, but that could deadlock the allocator as
45726 * the work threads writing to the cache may all end up sleeping
45727 * on memory allocation */
45728 - fscache_stat(&fscache_n_store_vmscan_busy);
45729 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45730 return false;
45731 }
45732 EXPORT_SYMBOL(__fscache_maybe_release_page);
45733 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45734 FSCACHE_COOKIE_STORING_TAG);
45735 if (!radix_tree_tag_get(&cookie->stores, page->index,
45736 FSCACHE_COOKIE_PENDING_TAG)) {
45737 - fscache_stat(&fscache_n_store_radix_deletes);
45738 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45739 xpage = radix_tree_delete(&cookie->stores, page->index);
45740 }
45741 spin_unlock(&cookie->stores_lock);
45742 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45743
45744 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45745
45746 - fscache_stat(&fscache_n_attr_changed_calls);
45747 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45748
45749 if (fscache_object_is_active(object)) {
45750 fscache_stat(&fscache_n_cop_attr_changed);
45751 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45752
45753 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45754
45755 - fscache_stat(&fscache_n_attr_changed);
45756 + fscache_stat_unchecked(&fscache_n_attr_changed);
45757
45758 op = kzalloc(sizeof(*op), GFP_KERNEL);
45759 if (!op) {
45760 - fscache_stat(&fscache_n_attr_changed_nomem);
45761 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45762 _leave(" = -ENOMEM");
45763 return -ENOMEM;
45764 }
45765 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45766 if (fscache_submit_exclusive_op(object, op) < 0)
45767 goto nobufs;
45768 spin_unlock(&cookie->lock);
45769 - fscache_stat(&fscache_n_attr_changed_ok);
45770 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45771 fscache_put_operation(op);
45772 _leave(" = 0");
45773 return 0;
45774 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45775 nobufs:
45776 spin_unlock(&cookie->lock);
45777 kfree(op);
45778 - fscache_stat(&fscache_n_attr_changed_nobufs);
45779 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45780 _leave(" = %d", -ENOBUFS);
45781 return -ENOBUFS;
45782 }
45783 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45784 /* allocate a retrieval operation and attempt to submit it */
45785 op = kzalloc(sizeof(*op), GFP_NOIO);
45786 if (!op) {
45787 - fscache_stat(&fscache_n_retrievals_nomem);
45788 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45789 return NULL;
45790 }
45791
45792 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45793 return 0;
45794 }
45795
45796 - fscache_stat(&fscache_n_retrievals_wait);
45797 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45798
45799 jif = jiffies;
45800 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45801 fscache_wait_bit_interruptible,
45802 TASK_INTERRUPTIBLE) != 0) {
45803 - fscache_stat(&fscache_n_retrievals_intr);
45804 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45805 _leave(" = -ERESTARTSYS");
45806 return -ERESTARTSYS;
45807 }
45808 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45809 */
45810 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45811 struct fscache_retrieval *op,
45812 - atomic_t *stat_op_waits,
45813 - atomic_t *stat_object_dead)
45814 + atomic_unchecked_t *stat_op_waits,
45815 + atomic_unchecked_t *stat_object_dead)
45816 {
45817 int ret;
45818
45819 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45820 goto check_if_dead;
45821
45822 _debug(">>> WT");
45823 - fscache_stat(stat_op_waits);
45824 + fscache_stat_unchecked(stat_op_waits);
45825 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45826 fscache_wait_bit_interruptible,
45827 TASK_INTERRUPTIBLE) < 0) {
45828 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45829
45830 check_if_dead:
45831 if (unlikely(fscache_object_is_dead(object))) {
45832 - fscache_stat(stat_object_dead);
45833 + fscache_stat_unchecked(stat_object_dead);
45834 return -ENOBUFS;
45835 }
45836 return 0;
45837 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45838
45839 _enter("%p,%p,,,", cookie, page);
45840
45841 - fscache_stat(&fscache_n_retrievals);
45842 + fscache_stat_unchecked(&fscache_n_retrievals);
45843
45844 if (hlist_empty(&cookie->backing_objects))
45845 goto nobufs;
45846 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45847 goto nobufs_unlock;
45848 spin_unlock(&cookie->lock);
45849
45850 - fscache_stat(&fscache_n_retrieval_ops);
45851 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45852
45853 /* pin the netfs read context in case we need to do the actual netfs
45854 * read because we've encountered a cache read failure */
45855 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45856
45857 error:
45858 if (ret == -ENOMEM)
45859 - fscache_stat(&fscache_n_retrievals_nomem);
45860 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45861 else if (ret == -ERESTARTSYS)
45862 - fscache_stat(&fscache_n_retrievals_intr);
45863 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45864 else if (ret == -ENODATA)
45865 - fscache_stat(&fscache_n_retrievals_nodata);
45866 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45867 else if (ret < 0)
45868 - fscache_stat(&fscache_n_retrievals_nobufs);
45869 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45870 else
45871 - fscache_stat(&fscache_n_retrievals_ok);
45872 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45873
45874 fscache_put_retrieval(op);
45875 _leave(" = %d", ret);
45876 @@ -429,7 +429,7 @@ nobufs_unlock:
45877 spin_unlock(&cookie->lock);
45878 kfree(op);
45879 nobufs:
45880 - fscache_stat(&fscache_n_retrievals_nobufs);
45881 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45882 _leave(" = -ENOBUFS");
45883 return -ENOBUFS;
45884 }
45885 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45886
45887 _enter("%p,,%d,,,", cookie, *nr_pages);
45888
45889 - fscache_stat(&fscache_n_retrievals);
45890 + fscache_stat_unchecked(&fscache_n_retrievals);
45891
45892 if (hlist_empty(&cookie->backing_objects))
45893 goto nobufs;
45894 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45895 goto nobufs_unlock;
45896 spin_unlock(&cookie->lock);
45897
45898 - fscache_stat(&fscache_n_retrieval_ops);
45899 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45900
45901 /* pin the netfs read context in case we need to do the actual netfs
45902 * read because we've encountered a cache read failure */
45903 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45904
45905 error:
45906 if (ret == -ENOMEM)
45907 - fscache_stat(&fscache_n_retrievals_nomem);
45908 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45909 else if (ret == -ERESTARTSYS)
45910 - fscache_stat(&fscache_n_retrievals_intr);
45911 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45912 else if (ret == -ENODATA)
45913 - fscache_stat(&fscache_n_retrievals_nodata);
45914 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45915 else if (ret < 0)
45916 - fscache_stat(&fscache_n_retrievals_nobufs);
45917 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45918 else
45919 - fscache_stat(&fscache_n_retrievals_ok);
45920 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45921
45922 fscache_put_retrieval(op);
45923 _leave(" = %d", ret);
45924 @@ -545,7 +545,7 @@ nobufs_unlock:
45925 spin_unlock(&cookie->lock);
45926 kfree(op);
45927 nobufs:
45928 - fscache_stat(&fscache_n_retrievals_nobufs);
45929 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45930 _leave(" = -ENOBUFS");
45931 return -ENOBUFS;
45932 }
45933 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45934
45935 _enter("%p,%p,,,", cookie, page);
45936
45937 - fscache_stat(&fscache_n_allocs);
45938 + fscache_stat_unchecked(&fscache_n_allocs);
45939
45940 if (hlist_empty(&cookie->backing_objects))
45941 goto nobufs;
45942 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45943 goto nobufs_unlock;
45944 spin_unlock(&cookie->lock);
45945
45946 - fscache_stat(&fscache_n_alloc_ops);
45947 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45948
45949 ret = fscache_wait_for_retrieval_activation(
45950 object, op,
45951 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45952
45953 error:
45954 if (ret == -ERESTARTSYS)
45955 - fscache_stat(&fscache_n_allocs_intr);
45956 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45957 else if (ret < 0)
45958 - fscache_stat(&fscache_n_allocs_nobufs);
45959 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45960 else
45961 - fscache_stat(&fscache_n_allocs_ok);
45962 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45963
45964 fscache_put_retrieval(op);
45965 _leave(" = %d", ret);
45966 @@ -625,7 +625,7 @@ nobufs_unlock:
45967 spin_unlock(&cookie->lock);
45968 kfree(op);
45969 nobufs:
45970 - fscache_stat(&fscache_n_allocs_nobufs);
45971 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45972 _leave(" = -ENOBUFS");
45973 return -ENOBUFS;
45974 }
45975 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45976
45977 spin_lock(&cookie->stores_lock);
45978
45979 - fscache_stat(&fscache_n_store_calls);
45980 + fscache_stat_unchecked(&fscache_n_store_calls);
45981
45982 /* find a page to store */
45983 page = NULL;
45984 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45985 page = results[0];
45986 _debug("gang %d [%lx]", n, page->index);
45987 if (page->index > op->store_limit) {
45988 - fscache_stat(&fscache_n_store_pages_over_limit);
45989 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45990 goto superseded;
45991 }
45992
45993 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45994 spin_unlock(&cookie->stores_lock);
45995 spin_unlock(&object->lock);
45996
45997 - fscache_stat(&fscache_n_store_pages);
45998 + fscache_stat_unchecked(&fscache_n_store_pages);
45999 fscache_stat(&fscache_n_cop_write_page);
46000 ret = object->cache->ops->write_page(op, page);
46001 fscache_stat_d(&fscache_n_cop_write_page);
46002 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46003 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46004 ASSERT(PageFsCache(page));
46005
46006 - fscache_stat(&fscache_n_stores);
46007 + fscache_stat_unchecked(&fscache_n_stores);
46008
46009 op = kzalloc(sizeof(*op), GFP_NOIO);
46010 if (!op)
46011 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46012 spin_unlock(&cookie->stores_lock);
46013 spin_unlock(&object->lock);
46014
46015 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46016 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46017 op->store_limit = object->store_limit;
46018
46019 if (fscache_submit_op(object, &op->op) < 0)
46020 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46021
46022 spin_unlock(&cookie->lock);
46023 radix_tree_preload_end();
46024 - fscache_stat(&fscache_n_store_ops);
46025 - fscache_stat(&fscache_n_stores_ok);
46026 + fscache_stat_unchecked(&fscache_n_store_ops);
46027 + fscache_stat_unchecked(&fscache_n_stores_ok);
46028
46029 /* the work queue now carries its own ref on the object */
46030 fscache_put_operation(&op->op);
46031 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46032 return 0;
46033
46034 already_queued:
46035 - fscache_stat(&fscache_n_stores_again);
46036 + fscache_stat_unchecked(&fscache_n_stores_again);
46037 already_pending:
46038 spin_unlock(&cookie->stores_lock);
46039 spin_unlock(&object->lock);
46040 spin_unlock(&cookie->lock);
46041 radix_tree_preload_end();
46042 kfree(op);
46043 - fscache_stat(&fscache_n_stores_ok);
46044 + fscache_stat_unchecked(&fscache_n_stores_ok);
46045 _leave(" = 0");
46046 return 0;
46047
46048 @@ -851,14 +851,14 @@ nobufs:
46049 spin_unlock(&cookie->lock);
46050 radix_tree_preload_end();
46051 kfree(op);
46052 - fscache_stat(&fscache_n_stores_nobufs);
46053 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
46054 _leave(" = -ENOBUFS");
46055 return -ENOBUFS;
46056
46057 nomem_free:
46058 kfree(op);
46059 nomem:
46060 - fscache_stat(&fscache_n_stores_oom);
46061 + fscache_stat_unchecked(&fscache_n_stores_oom);
46062 _leave(" = -ENOMEM");
46063 return -ENOMEM;
46064 }
46065 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46066 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46067 ASSERTCMP(page, !=, NULL);
46068
46069 - fscache_stat(&fscache_n_uncaches);
46070 + fscache_stat_unchecked(&fscache_n_uncaches);
46071
46072 /* cache withdrawal may beat us to it */
46073 if (!PageFsCache(page))
46074 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46075 unsigned long loop;
46076
46077 #ifdef CONFIG_FSCACHE_STATS
46078 - atomic_add(pagevec->nr, &fscache_n_marks);
46079 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46080 #endif
46081
46082 for (loop = 0; loop < pagevec->nr; loop++) {
46083 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46084 index 4765190..2a067f2 100644
46085 --- a/fs/fscache/stats.c
46086 +++ b/fs/fscache/stats.c
46087 @@ -18,95 +18,95 @@
46088 /*
46089 * operation counters
46090 */
46091 -atomic_t fscache_n_op_pend;
46092 -atomic_t fscache_n_op_run;
46093 -atomic_t fscache_n_op_enqueue;
46094 -atomic_t fscache_n_op_requeue;
46095 -atomic_t fscache_n_op_deferred_release;
46096 -atomic_t fscache_n_op_release;
46097 -atomic_t fscache_n_op_gc;
46098 -atomic_t fscache_n_op_cancelled;
46099 -atomic_t fscache_n_op_rejected;
46100 +atomic_unchecked_t fscache_n_op_pend;
46101 +atomic_unchecked_t fscache_n_op_run;
46102 +atomic_unchecked_t fscache_n_op_enqueue;
46103 +atomic_unchecked_t fscache_n_op_requeue;
46104 +atomic_unchecked_t fscache_n_op_deferred_release;
46105 +atomic_unchecked_t fscache_n_op_release;
46106 +atomic_unchecked_t fscache_n_op_gc;
46107 +atomic_unchecked_t fscache_n_op_cancelled;
46108 +atomic_unchecked_t fscache_n_op_rejected;
46109
46110 -atomic_t fscache_n_attr_changed;
46111 -atomic_t fscache_n_attr_changed_ok;
46112 -atomic_t fscache_n_attr_changed_nobufs;
46113 -atomic_t fscache_n_attr_changed_nomem;
46114 -atomic_t fscache_n_attr_changed_calls;
46115 +atomic_unchecked_t fscache_n_attr_changed;
46116 +atomic_unchecked_t fscache_n_attr_changed_ok;
46117 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
46118 +atomic_unchecked_t fscache_n_attr_changed_nomem;
46119 +atomic_unchecked_t fscache_n_attr_changed_calls;
46120
46121 -atomic_t fscache_n_allocs;
46122 -atomic_t fscache_n_allocs_ok;
46123 -atomic_t fscache_n_allocs_wait;
46124 -atomic_t fscache_n_allocs_nobufs;
46125 -atomic_t fscache_n_allocs_intr;
46126 -atomic_t fscache_n_allocs_object_dead;
46127 -atomic_t fscache_n_alloc_ops;
46128 -atomic_t fscache_n_alloc_op_waits;
46129 +atomic_unchecked_t fscache_n_allocs;
46130 +atomic_unchecked_t fscache_n_allocs_ok;
46131 +atomic_unchecked_t fscache_n_allocs_wait;
46132 +atomic_unchecked_t fscache_n_allocs_nobufs;
46133 +atomic_unchecked_t fscache_n_allocs_intr;
46134 +atomic_unchecked_t fscache_n_allocs_object_dead;
46135 +atomic_unchecked_t fscache_n_alloc_ops;
46136 +atomic_unchecked_t fscache_n_alloc_op_waits;
46137
46138 -atomic_t fscache_n_retrievals;
46139 -atomic_t fscache_n_retrievals_ok;
46140 -atomic_t fscache_n_retrievals_wait;
46141 -atomic_t fscache_n_retrievals_nodata;
46142 -atomic_t fscache_n_retrievals_nobufs;
46143 -atomic_t fscache_n_retrievals_intr;
46144 -atomic_t fscache_n_retrievals_nomem;
46145 -atomic_t fscache_n_retrievals_object_dead;
46146 -atomic_t fscache_n_retrieval_ops;
46147 -atomic_t fscache_n_retrieval_op_waits;
46148 +atomic_unchecked_t fscache_n_retrievals;
46149 +atomic_unchecked_t fscache_n_retrievals_ok;
46150 +atomic_unchecked_t fscache_n_retrievals_wait;
46151 +atomic_unchecked_t fscache_n_retrievals_nodata;
46152 +atomic_unchecked_t fscache_n_retrievals_nobufs;
46153 +atomic_unchecked_t fscache_n_retrievals_intr;
46154 +atomic_unchecked_t fscache_n_retrievals_nomem;
46155 +atomic_unchecked_t fscache_n_retrievals_object_dead;
46156 +atomic_unchecked_t fscache_n_retrieval_ops;
46157 +atomic_unchecked_t fscache_n_retrieval_op_waits;
46158
46159 -atomic_t fscache_n_stores;
46160 -atomic_t fscache_n_stores_ok;
46161 -atomic_t fscache_n_stores_again;
46162 -atomic_t fscache_n_stores_nobufs;
46163 -atomic_t fscache_n_stores_oom;
46164 -atomic_t fscache_n_store_ops;
46165 -atomic_t fscache_n_store_calls;
46166 -atomic_t fscache_n_store_pages;
46167 -atomic_t fscache_n_store_radix_deletes;
46168 -atomic_t fscache_n_store_pages_over_limit;
46169 +atomic_unchecked_t fscache_n_stores;
46170 +atomic_unchecked_t fscache_n_stores_ok;
46171 +atomic_unchecked_t fscache_n_stores_again;
46172 +atomic_unchecked_t fscache_n_stores_nobufs;
46173 +atomic_unchecked_t fscache_n_stores_oom;
46174 +atomic_unchecked_t fscache_n_store_ops;
46175 +atomic_unchecked_t fscache_n_store_calls;
46176 +atomic_unchecked_t fscache_n_store_pages;
46177 +atomic_unchecked_t fscache_n_store_radix_deletes;
46178 +atomic_unchecked_t fscache_n_store_pages_over_limit;
46179
46180 -atomic_t fscache_n_store_vmscan_not_storing;
46181 -atomic_t fscache_n_store_vmscan_gone;
46182 -atomic_t fscache_n_store_vmscan_busy;
46183 -atomic_t fscache_n_store_vmscan_cancelled;
46184 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46185 +atomic_unchecked_t fscache_n_store_vmscan_gone;
46186 +atomic_unchecked_t fscache_n_store_vmscan_busy;
46187 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46188
46189 -atomic_t fscache_n_marks;
46190 -atomic_t fscache_n_uncaches;
46191 +atomic_unchecked_t fscache_n_marks;
46192 +atomic_unchecked_t fscache_n_uncaches;
46193
46194 -atomic_t fscache_n_acquires;
46195 -atomic_t fscache_n_acquires_null;
46196 -atomic_t fscache_n_acquires_no_cache;
46197 -atomic_t fscache_n_acquires_ok;
46198 -atomic_t fscache_n_acquires_nobufs;
46199 -atomic_t fscache_n_acquires_oom;
46200 +atomic_unchecked_t fscache_n_acquires;
46201 +atomic_unchecked_t fscache_n_acquires_null;
46202 +atomic_unchecked_t fscache_n_acquires_no_cache;
46203 +atomic_unchecked_t fscache_n_acquires_ok;
46204 +atomic_unchecked_t fscache_n_acquires_nobufs;
46205 +atomic_unchecked_t fscache_n_acquires_oom;
46206
46207 -atomic_t fscache_n_updates;
46208 -atomic_t fscache_n_updates_null;
46209 -atomic_t fscache_n_updates_run;
46210 +atomic_unchecked_t fscache_n_updates;
46211 +atomic_unchecked_t fscache_n_updates_null;
46212 +atomic_unchecked_t fscache_n_updates_run;
46213
46214 -atomic_t fscache_n_relinquishes;
46215 -atomic_t fscache_n_relinquishes_null;
46216 -atomic_t fscache_n_relinquishes_waitcrt;
46217 -atomic_t fscache_n_relinquishes_retire;
46218 +atomic_unchecked_t fscache_n_relinquishes;
46219 +atomic_unchecked_t fscache_n_relinquishes_null;
46220 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46221 +atomic_unchecked_t fscache_n_relinquishes_retire;
46222
46223 -atomic_t fscache_n_cookie_index;
46224 -atomic_t fscache_n_cookie_data;
46225 -atomic_t fscache_n_cookie_special;
46226 +atomic_unchecked_t fscache_n_cookie_index;
46227 +atomic_unchecked_t fscache_n_cookie_data;
46228 +atomic_unchecked_t fscache_n_cookie_special;
46229
46230 -atomic_t fscache_n_object_alloc;
46231 -atomic_t fscache_n_object_no_alloc;
46232 -atomic_t fscache_n_object_lookups;
46233 -atomic_t fscache_n_object_lookups_negative;
46234 -atomic_t fscache_n_object_lookups_positive;
46235 -atomic_t fscache_n_object_lookups_timed_out;
46236 -atomic_t fscache_n_object_created;
46237 -atomic_t fscache_n_object_avail;
46238 -atomic_t fscache_n_object_dead;
46239 +atomic_unchecked_t fscache_n_object_alloc;
46240 +atomic_unchecked_t fscache_n_object_no_alloc;
46241 +atomic_unchecked_t fscache_n_object_lookups;
46242 +atomic_unchecked_t fscache_n_object_lookups_negative;
46243 +atomic_unchecked_t fscache_n_object_lookups_positive;
46244 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
46245 +atomic_unchecked_t fscache_n_object_created;
46246 +atomic_unchecked_t fscache_n_object_avail;
46247 +atomic_unchecked_t fscache_n_object_dead;
46248
46249 -atomic_t fscache_n_checkaux_none;
46250 -atomic_t fscache_n_checkaux_okay;
46251 -atomic_t fscache_n_checkaux_update;
46252 -atomic_t fscache_n_checkaux_obsolete;
46253 +atomic_unchecked_t fscache_n_checkaux_none;
46254 +atomic_unchecked_t fscache_n_checkaux_okay;
46255 +atomic_unchecked_t fscache_n_checkaux_update;
46256 +atomic_unchecked_t fscache_n_checkaux_obsolete;
46257
46258 atomic_t fscache_n_cop_alloc_object;
46259 atomic_t fscache_n_cop_lookup_object;
46260 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46261 seq_puts(m, "FS-Cache statistics\n");
46262
46263 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46264 - atomic_read(&fscache_n_cookie_index),
46265 - atomic_read(&fscache_n_cookie_data),
46266 - atomic_read(&fscache_n_cookie_special));
46267 + atomic_read_unchecked(&fscache_n_cookie_index),
46268 + atomic_read_unchecked(&fscache_n_cookie_data),
46269 + atomic_read_unchecked(&fscache_n_cookie_special));
46270
46271 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46272 - atomic_read(&fscache_n_object_alloc),
46273 - atomic_read(&fscache_n_object_no_alloc),
46274 - atomic_read(&fscache_n_object_avail),
46275 - atomic_read(&fscache_n_object_dead));
46276 + atomic_read_unchecked(&fscache_n_object_alloc),
46277 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46278 + atomic_read_unchecked(&fscache_n_object_avail),
46279 + atomic_read_unchecked(&fscache_n_object_dead));
46280 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46281 - atomic_read(&fscache_n_checkaux_none),
46282 - atomic_read(&fscache_n_checkaux_okay),
46283 - atomic_read(&fscache_n_checkaux_update),
46284 - atomic_read(&fscache_n_checkaux_obsolete));
46285 + atomic_read_unchecked(&fscache_n_checkaux_none),
46286 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46287 + atomic_read_unchecked(&fscache_n_checkaux_update),
46288 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46289
46290 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46291 - atomic_read(&fscache_n_marks),
46292 - atomic_read(&fscache_n_uncaches));
46293 + atomic_read_unchecked(&fscache_n_marks),
46294 + atomic_read_unchecked(&fscache_n_uncaches));
46295
46296 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46297 " oom=%u\n",
46298 - atomic_read(&fscache_n_acquires),
46299 - atomic_read(&fscache_n_acquires_null),
46300 - atomic_read(&fscache_n_acquires_no_cache),
46301 - atomic_read(&fscache_n_acquires_ok),
46302 - atomic_read(&fscache_n_acquires_nobufs),
46303 - atomic_read(&fscache_n_acquires_oom));
46304 + atomic_read_unchecked(&fscache_n_acquires),
46305 + atomic_read_unchecked(&fscache_n_acquires_null),
46306 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46307 + atomic_read_unchecked(&fscache_n_acquires_ok),
46308 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46309 + atomic_read_unchecked(&fscache_n_acquires_oom));
46310
46311 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46312 - atomic_read(&fscache_n_object_lookups),
46313 - atomic_read(&fscache_n_object_lookups_negative),
46314 - atomic_read(&fscache_n_object_lookups_positive),
46315 - atomic_read(&fscache_n_object_created),
46316 - atomic_read(&fscache_n_object_lookups_timed_out));
46317 + atomic_read_unchecked(&fscache_n_object_lookups),
46318 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46319 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46320 + atomic_read_unchecked(&fscache_n_object_created),
46321 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46322
46323 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46324 - atomic_read(&fscache_n_updates),
46325 - atomic_read(&fscache_n_updates_null),
46326 - atomic_read(&fscache_n_updates_run));
46327 + atomic_read_unchecked(&fscache_n_updates),
46328 + atomic_read_unchecked(&fscache_n_updates_null),
46329 + atomic_read_unchecked(&fscache_n_updates_run));
46330
46331 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46332 - atomic_read(&fscache_n_relinquishes),
46333 - atomic_read(&fscache_n_relinquishes_null),
46334 - atomic_read(&fscache_n_relinquishes_waitcrt),
46335 - atomic_read(&fscache_n_relinquishes_retire));
46336 + atomic_read_unchecked(&fscache_n_relinquishes),
46337 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46338 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46339 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46340
46341 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46342 - atomic_read(&fscache_n_attr_changed),
46343 - atomic_read(&fscache_n_attr_changed_ok),
46344 - atomic_read(&fscache_n_attr_changed_nobufs),
46345 - atomic_read(&fscache_n_attr_changed_nomem),
46346 - atomic_read(&fscache_n_attr_changed_calls));
46347 + atomic_read_unchecked(&fscache_n_attr_changed),
46348 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46349 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46350 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46351 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46352
46353 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46354 - atomic_read(&fscache_n_allocs),
46355 - atomic_read(&fscache_n_allocs_ok),
46356 - atomic_read(&fscache_n_allocs_wait),
46357 - atomic_read(&fscache_n_allocs_nobufs),
46358 - atomic_read(&fscache_n_allocs_intr));
46359 + atomic_read_unchecked(&fscache_n_allocs),
46360 + atomic_read_unchecked(&fscache_n_allocs_ok),
46361 + atomic_read_unchecked(&fscache_n_allocs_wait),
46362 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46363 + atomic_read_unchecked(&fscache_n_allocs_intr));
46364 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46365 - atomic_read(&fscache_n_alloc_ops),
46366 - atomic_read(&fscache_n_alloc_op_waits),
46367 - atomic_read(&fscache_n_allocs_object_dead));
46368 + atomic_read_unchecked(&fscache_n_alloc_ops),
46369 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46370 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46371
46372 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46373 " int=%u oom=%u\n",
46374 - atomic_read(&fscache_n_retrievals),
46375 - atomic_read(&fscache_n_retrievals_ok),
46376 - atomic_read(&fscache_n_retrievals_wait),
46377 - atomic_read(&fscache_n_retrievals_nodata),
46378 - atomic_read(&fscache_n_retrievals_nobufs),
46379 - atomic_read(&fscache_n_retrievals_intr),
46380 - atomic_read(&fscache_n_retrievals_nomem));
46381 + atomic_read_unchecked(&fscache_n_retrievals),
46382 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46383 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46384 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46385 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46386 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46387 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46388 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46389 - atomic_read(&fscache_n_retrieval_ops),
46390 - atomic_read(&fscache_n_retrieval_op_waits),
46391 - atomic_read(&fscache_n_retrievals_object_dead));
46392 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46393 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46394 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46395
46396 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46397 - atomic_read(&fscache_n_stores),
46398 - atomic_read(&fscache_n_stores_ok),
46399 - atomic_read(&fscache_n_stores_again),
46400 - atomic_read(&fscache_n_stores_nobufs),
46401 - atomic_read(&fscache_n_stores_oom));
46402 + atomic_read_unchecked(&fscache_n_stores),
46403 + atomic_read_unchecked(&fscache_n_stores_ok),
46404 + atomic_read_unchecked(&fscache_n_stores_again),
46405 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46406 + atomic_read_unchecked(&fscache_n_stores_oom));
46407 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46408 - atomic_read(&fscache_n_store_ops),
46409 - atomic_read(&fscache_n_store_calls),
46410 - atomic_read(&fscache_n_store_pages),
46411 - atomic_read(&fscache_n_store_radix_deletes),
46412 - atomic_read(&fscache_n_store_pages_over_limit));
46413 + atomic_read_unchecked(&fscache_n_store_ops),
46414 + atomic_read_unchecked(&fscache_n_store_calls),
46415 + atomic_read_unchecked(&fscache_n_store_pages),
46416 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46417 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46418
46419 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46420 - atomic_read(&fscache_n_store_vmscan_not_storing),
46421 - atomic_read(&fscache_n_store_vmscan_gone),
46422 - atomic_read(&fscache_n_store_vmscan_busy),
46423 - atomic_read(&fscache_n_store_vmscan_cancelled));
46424 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46425 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46426 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46427 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46428
46429 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46430 - atomic_read(&fscache_n_op_pend),
46431 - atomic_read(&fscache_n_op_run),
46432 - atomic_read(&fscache_n_op_enqueue),
46433 - atomic_read(&fscache_n_op_cancelled),
46434 - atomic_read(&fscache_n_op_rejected));
46435 + atomic_read_unchecked(&fscache_n_op_pend),
46436 + atomic_read_unchecked(&fscache_n_op_run),
46437 + atomic_read_unchecked(&fscache_n_op_enqueue),
46438 + atomic_read_unchecked(&fscache_n_op_cancelled),
46439 + atomic_read_unchecked(&fscache_n_op_rejected));
46440 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46441 - atomic_read(&fscache_n_op_deferred_release),
46442 - atomic_read(&fscache_n_op_release),
46443 - atomic_read(&fscache_n_op_gc));
46444 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46445 + atomic_read_unchecked(&fscache_n_op_release),
46446 + atomic_read_unchecked(&fscache_n_op_gc));
46447
46448 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46449 atomic_read(&fscache_n_cop_alloc_object),
46450 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46451 index 3426521..3b75162 100644
46452 --- a/fs/fuse/cuse.c
46453 +++ b/fs/fuse/cuse.c
46454 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
46455 INIT_LIST_HEAD(&cuse_conntbl[i]);
46456
46457 /* inherit and extend fuse_dev_operations */
46458 - cuse_channel_fops = fuse_dev_operations;
46459 - cuse_channel_fops.owner = THIS_MODULE;
46460 - cuse_channel_fops.open = cuse_channel_open;
46461 - cuse_channel_fops.release = cuse_channel_release;
46462 + pax_open_kernel();
46463 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46464 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46465 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46466 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46467 + pax_close_kernel();
46468
46469 cuse_class = class_create(THIS_MODULE, "cuse");
46470 if (IS_ERR(cuse_class))
46471 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46472 index 5f3368a..8306426 100644
46473 --- a/fs/fuse/dev.c
46474 +++ b/fs/fuse/dev.c
46475 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46476 ret = 0;
46477 pipe_lock(pipe);
46478
46479 - if (!pipe->readers) {
46480 + if (!atomic_read(&pipe->readers)) {
46481 send_sig(SIGPIPE, current, 0);
46482 if (!ret)
46483 ret = -EPIPE;
46484 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46485 index 2066328..f5add3b 100644
46486 --- a/fs/fuse/dir.c
46487 +++ b/fs/fuse/dir.c
46488 @@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
46489 return link;
46490 }
46491
46492 -static void free_link(char *link)
46493 +static void free_link(const char *link)
46494 {
46495 if (!IS_ERR(link))
46496 free_page((unsigned long) link);
46497 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46498 index 5698746..6086012 100644
46499 --- a/fs/gfs2/inode.c
46500 +++ b/fs/gfs2/inode.c
46501 @@ -1487,7 +1487,7 @@ out:
46502
46503 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46504 {
46505 - char *s = nd_get_link(nd);
46506 + const char *s = nd_get_link(nd);
46507 if (!IS_ERR(s))
46508 kfree(s);
46509 }
46510 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46511 index 3645cd3..786809c 100644
46512 --- a/fs/hugetlbfs/inode.c
46513 +++ b/fs/hugetlbfs/inode.c
46514 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46515 .kill_sb = kill_litter_super,
46516 };
46517
46518 -static struct vfsmount *hugetlbfs_vfsmount;
46519 +struct vfsmount *hugetlbfs_vfsmount;
46520
46521 static int can_do_hugetlb_shm(void)
46522 {
46523 diff --git a/fs/inode.c b/fs/inode.c
46524 index 83ab215..8842101 100644
46525 --- a/fs/inode.c
46526 +++ b/fs/inode.c
46527 @@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
46528
46529 #ifdef CONFIG_SMP
46530 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46531 - static atomic_t shared_last_ino;
46532 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46533 + static atomic_unchecked_t shared_last_ino;
46534 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46535
46536 res = next - LAST_INO_BATCH;
46537 }
46538 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46539 index eafb8d3..f423d37 100644
46540 --- a/fs/jffs2/erase.c
46541 +++ b/fs/jffs2/erase.c
46542 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46543 struct jffs2_unknown_node marker = {
46544 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46545 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46546 - .totlen = cpu_to_je32(c->cleanmarker_size)
46547 + .totlen = cpu_to_je32(c->cleanmarker_size),
46548 + .hdr_crc = cpu_to_je32(0)
46549 };
46550
46551 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46552 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46553 index 30e8f47..21f600c 100644
46554 --- a/fs/jffs2/wbuf.c
46555 +++ b/fs/jffs2/wbuf.c
46556 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46557 {
46558 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46559 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46560 - .totlen = constant_cpu_to_je32(8)
46561 + .totlen = constant_cpu_to_je32(8),
46562 + .hdr_crc = constant_cpu_to_je32(0)
46563 };
46564
46565 /*
46566 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46567 index 682bca6..86b8e6e 100644
46568 --- a/fs/jfs/super.c
46569 +++ b/fs/jfs/super.c
46570 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46571
46572 jfs_inode_cachep =
46573 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46574 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46575 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46576 init_once);
46577 if (jfs_inode_cachep == NULL)
46578 return -ENOMEM;
46579 diff --git a/fs/libfs.c b/fs/libfs.c
46580 index 5b2dbb3..7442d54 100644
46581 --- a/fs/libfs.c
46582 +++ b/fs/libfs.c
46583 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46584
46585 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46586 struct dentry *next;
46587 + char d_name[sizeof(next->d_iname)];
46588 + const unsigned char *name;
46589 +
46590 next = list_entry(p, struct dentry, d_u.d_child);
46591 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46592 if (!simple_positive(next)) {
46593 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46594
46595 spin_unlock(&next->d_lock);
46596 spin_unlock(&dentry->d_lock);
46597 - if (filldir(dirent, next->d_name.name,
46598 + name = next->d_name.name;
46599 + if (name == next->d_iname) {
46600 + memcpy(d_name, name, next->d_name.len);
46601 + name = d_name;
46602 + }
46603 + if (filldir(dirent, name,
46604 next->d_name.len, filp->f_pos,
46605 next->d_inode->i_ino,
46606 dt_type(next->d_inode)) < 0)
46607 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46608 index 8392cb8..80d6193 100644
46609 --- a/fs/lockd/clntproc.c
46610 +++ b/fs/lockd/clntproc.c
46611 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46612 /*
46613 * Cookie counter for NLM requests
46614 */
46615 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46616 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46617
46618 void nlmclnt_next_cookie(struct nlm_cookie *c)
46619 {
46620 - u32 cookie = atomic_inc_return(&nlm_cookie);
46621 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46622
46623 memcpy(c->data, &cookie, 4);
46624 c->len=4;
46625 diff --git a/fs/locks.c b/fs/locks.c
46626 index 0d68f1f..f216b79 100644
46627 --- a/fs/locks.c
46628 +++ b/fs/locks.c
46629 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46630 return;
46631
46632 if (filp->f_op && filp->f_op->flock) {
46633 - struct file_lock fl = {
46634 + struct file_lock flock = {
46635 .fl_pid = current->tgid,
46636 .fl_file = filp,
46637 .fl_flags = FL_FLOCK,
46638 .fl_type = F_UNLCK,
46639 .fl_end = OFFSET_MAX,
46640 };
46641 - filp->f_op->flock(filp, F_SETLKW, &fl);
46642 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46643 - fl.fl_ops->fl_release_private(&fl);
46644 + filp->f_op->flock(filp, F_SETLKW, &flock);
46645 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46646 + flock.fl_ops->fl_release_private(&flock);
46647 }
46648
46649 lock_flocks();
46650 diff --git a/fs/namei.c b/fs/namei.c
46651 index 46ea9cc..c7cf3a3 100644
46652 --- a/fs/namei.c
46653 +++ b/fs/namei.c
46654 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46655 if (ret != -EACCES)
46656 return ret;
46657
46658 +#ifdef CONFIG_GRKERNSEC
46659 + /* we'll block if we have to log due to a denied capability use */
46660 + if (mask & MAY_NOT_BLOCK)
46661 + return -ECHILD;
46662 +#endif
46663 +
46664 if (S_ISDIR(inode->i_mode)) {
46665 /* DACs are overridable for directories */
46666 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46667 - return 0;
46668 if (!(mask & MAY_WRITE))
46669 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46670 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46671 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46672 return 0;
46673 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46674 + return 0;
46675 return -EACCES;
46676 }
46677 /*
46678 + * Searching includes executable on directories, else just read.
46679 + */
46680 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46681 + if (mask == MAY_READ)
46682 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46683 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46684 + return 0;
46685 +
46686 + /*
46687 * Read/write DACs are always overridable.
46688 * Executable DACs are overridable when there is
46689 * at least one exec bit set.
46690 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46691 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46692 return 0;
46693
46694 - /*
46695 - * Searching includes executable on directories, else just read.
46696 - */
46697 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46698 - if (mask == MAY_READ)
46699 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46700 - return 0;
46701 -
46702 return -EACCES;
46703 }
46704
46705 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46706 return error;
46707 }
46708
46709 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46710 + dentry->d_inode, dentry, nd->path.mnt)) {
46711 + error = -EACCES;
46712 + *p = ERR_PTR(error); /* no ->put_link(), please */
46713 + path_put(&nd->path);
46714 + return error;
46715 + }
46716 +
46717 nd->last_type = LAST_BIND;
46718 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46719 error = PTR_ERR(*p);
46720 if (!IS_ERR(*p)) {
46721 - char *s = nd_get_link(nd);
46722 + const char *s = nd_get_link(nd);
46723 error = 0;
46724 if (s)
46725 error = __vfs_follow_link(nd, s);
46726 @@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
46727 if (!err)
46728 err = complete_walk(nd);
46729
46730 + if (!(nd->flags & LOOKUP_PARENT)) {
46731 +#ifdef CONFIG_GRKERNSEC
46732 + if (flags & LOOKUP_RCU) {
46733 + if (!err)
46734 + path_put(&nd->path);
46735 + err = -ECHILD;
46736 + } else
46737 +#endif
46738 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46739 + if (!err)
46740 + path_put(&nd->path);
46741 + err = -ENOENT;
46742 + }
46743 + }
46744 +
46745 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46746 if (!nd->inode->i_op->lookup) {
46747 path_put(&nd->path);
46748 @@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
46749 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46750
46751 if (likely(!retval)) {
46752 + if (*name != '/' && nd->path.dentry && nd->inode) {
46753 +#ifdef CONFIG_GRKERNSEC
46754 + if (flags & LOOKUP_RCU)
46755 + return -ECHILD;
46756 +#endif
46757 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46758 + return -ENOENT;
46759 + }
46760 +
46761 if (unlikely(!audit_dummy_context())) {
46762 if (nd->path.dentry && nd->inode)
46763 audit_inode(name, nd->path.dentry);
46764 @@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46765 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46766 return -EPERM;
46767
46768 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46769 + return -EPERM;
46770 + if (gr_handle_rawio(inode))
46771 + return -EPERM;
46772 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46773 + return -EACCES;
46774 +
46775 return 0;
46776 }
46777
46778 @@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46779 error = complete_walk(nd);
46780 if (error)
46781 return ERR_PTR(error);
46782 +#ifdef CONFIG_GRKERNSEC
46783 + if (nd->flags & LOOKUP_RCU) {
46784 + error = -ECHILD;
46785 + goto exit;
46786 + }
46787 +#endif
46788 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46789 + error = -ENOENT;
46790 + goto exit;
46791 + }
46792 audit_inode(pathname, nd->path.dentry);
46793 if (open_flag & O_CREAT) {
46794 error = -EISDIR;
46795 @@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46796 error = complete_walk(nd);
46797 if (error)
46798 return ERR_PTR(error);
46799 +#ifdef CONFIG_GRKERNSEC
46800 + if (nd->flags & LOOKUP_RCU) {
46801 + error = -ECHILD;
46802 + goto exit;
46803 + }
46804 +#endif
46805 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46806 + error = -ENOENT;
46807 + goto exit;
46808 + }
46809 audit_inode(pathname, dir);
46810 goto ok;
46811 }
46812 @@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46813 error = complete_walk(nd);
46814 if (error)
46815 return ERR_PTR(error);
46816 +#ifdef CONFIG_GRKERNSEC
46817 + if (nd->flags & LOOKUP_RCU) {
46818 + error = -ECHILD;
46819 + goto exit;
46820 + }
46821 +#endif
46822 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46823 + error = -ENOENT;
46824 + goto exit;
46825 + }
46826
46827 error = -ENOTDIR;
46828 if (nd->flags & LOOKUP_DIRECTORY) {
46829 @@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46830 /* Negative dentry, just create the file */
46831 if (!dentry->d_inode) {
46832 umode_t mode = op->mode;
46833 +
46834 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46835 + error = -EACCES;
46836 + goto exit_mutex_unlock;
46837 + }
46838 +
46839 if (!IS_POSIXACL(dir->d_inode))
46840 mode &= ~current_umask();
46841 /*
46842 @@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46843 error = vfs_create(dir->d_inode, dentry, mode, nd);
46844 if (error)
46845 goto exit_mutex_unlock;
46846 + else
46847 + gr_handle_create(path->dentry, path->mnt);
46848 mutex_unlock(&dir->d_inode->i_mutex);
46849 dput(nd->path.dentry);
46850 nd->path.dentry = dentry;
46851 @@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46852 /*
46853 * It already exists.
46854 */
46855 +
46856 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46857 + error = -ENOENT;
46858 + goto exit_mutex_unlock;
46859 + }
46860 +
46861 + /* only check if O_CREAT is specified, all other checks need to go
46862 + into may_open */
46863 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46864 + error = -EACCES;
46865 + goto exit_mutex_unlock;
46866 + }
46867 +
46868 mutex_unlock(&dir->d_inode->i_mutex);
46869 audit_inode(pathname, path->dentry);
46870
46871 @@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46872 *path = nd.path;
46873 return dentry;
46874 eexist:
46875 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46876 + dput(dentry);
46877 + dentry = ERR_PTR(-ENOENT);
46878 + goto fail;
46879 + }
46880 dput(dentry);
46881 dentry = ERR_PTR(-EEXIST);
46882 fail:
46883 @@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46884 }
46885 EXPORT_SYMBOL(user_path_create);
46886
46887 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46888 +{
46889 + char *tmp = getname(pathname);
46890 + struct dentry *res;
46891 + if (IS_ERR(tmp))
46892 + return ERR_CAST(tmp);
46893 + res = kern_path_create(dfd, tmp, path, is_dir);
46894 + if (IS_ERR(res))
46895 + putname(tmp);
46896 + else
46897 + *to = tmp;
46898 + return res;
46899 +}
46900 +
46901 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46902 {
46903 int error = may_create(dir, dentry);
46904 @@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46905 error = mnt_want_write(path.mnt);
46906 if (error)
46907 goto out_dput;
46908 +
46909 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46910 + error = -EPERM;
46911 + goto out_drop_write;
46912 + }
46913 +
46914 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46915 + error = -EACCES;
46916 + goto out_drop_write;
46917 + }
46918 +
46919 error = security_path_mknod(&path, dentry, mode, dev);
46920 if (error)
46921 goto out_drop_write;
46922 @@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46923 }
46924 out_drop_write:
46925 mnt_drop_write(path.mnt);
46926 +
46927 + if (!error)
46928 + gr_handle_create(dentry, path.mnt);
46929 out_dput:
46930 dput(dentry);
46931 mutex_unlock(&path.dentry->d_inode->i_mutex);
46932 @@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46933 error = mnt_want_write(path.mnt);
46934 if (error)
46935 goto out_dput;
46936 +
46937 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46938 + error = -EACCES;
46939 + goto out_drop_write;
46940 + }
46941 +
46942 error = security_path_mkdir(&path, dentry, mode);
46943 if (error)
46944 goto out_drop_write;
46945 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46946 out_drop_write:
46947 mnt_drop_write(path.mnt);
46948 +
46949 + if (!error)
46950 + gr_handle_create(dentry, path.mnt);
46951 out_dput:
46952 dput(dentry);
46953 mutex_unlock(&path.dentry->d_inode->i_mutex);
46954 @@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46955 char * name;
46956 struct dentry *dentry;
46957 struct nameidata nd;
46958 + ino_t saved_ino = 0;
46959 + dev_t saved_dev = 0;
46960
46961 error = user_path_parent(dfd, pathname, &nd, &name);
46962 if (error)
46963 @@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46964 error = -ENOENT;
46965 goto exit3;
46966 }
46967 +
46968 + saved_ino = dentry->d_inode->i_ino;
46969 + saved_dev = gr_get_dev_from_dentry(dentry);
46970 +
46971 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46972 + error = -EACCES;
46973 + goto exit3;
46974 + }
46975 +
46976 error = mnt_want_write(nd.path.mnt);
46977 if (error)
46978 goto exit3;
46979 @@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46980 if (error)
46981 goto exit4;
46982 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46983 + if (!error && (saved_dev || saved_ino))
46984 + gr_handle_delete(saved_ino, saved_dev);
46985 exit4:
46986 mnt_drop_write(nd.path.mnt);
46987 exit3:
46988 @@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46989 struct dentry *dentry;
46990 struct nameidata nd;
46991 struct inode *inode = NULL;
46992 + ino_t saved_ino = 0;
46993 + dev_t saved_dev = 0;
46994
46995 error = user_path_parent(dfd, pathname, &nd, &name);
46996 if (error)
46997 @@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46998 if (!inode)
46999 goto slashes;
47000 ihold(inode);
47001 +
47002 + if (inode->i_nlink <= 1) {
47003 + saved_ino = inode->i_ino;
47004 + saved_dev = gr_get_dev_from_dentry(dentry);
47005 + }
47006 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47007 + error = -EACCES;
47008 + goto exit2;
47009 + }
47010 +
47011 error = mnt_want_write(nd.path.mnt);
47012 if (error)
47013 goto exit2;
47014 @@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47015 if (error)
47016 goto exit3;
47017 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47018 + if (!error && (saved_ino || saved_dev))
47019 + gr_handle_delete(saved_ino, saved_dev);
47020 exit3:
47021 mnt_drop_write(nd.path.mnt);
47022 exit2:
47023 @@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47024 error = mnt_want_write(path.mnt);
47025 if (error)
47026 goto out_dput;
47027 +
47028 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47029 + error = -EACCES;
47030 + goto out_drop_write;
47031 + }
47032 +
47033 error = security_path_symlink(&path, dentry, from);
47034 if (error)
47035 goto out_drop_write;
47036 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47037 + if (!error)
47038 + gr_handle_create(dentry, path.mnt);
47039 out_drop_write:
47040 mnt_drop_write(path.mnt);
47041 out_dput:
47042 @@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47043 {
47044 struct dentry *new_dentry;
47045 struct path old_path, new_path;
47046 + char *to = NULL;
47047 int how = 0;
47048 int error;
47049
47050 @@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47051 if (error)
47052 return error;
47053
47054 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47055 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47056 error = PTR_ERR(new_dentry);
47057 if (IS_ERR(new_dentry))
47058 goto out;
47059 @@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47060 error = mnt_want_write(new_path.mnt);
47061 if (error)
47062 goto out_dput;
47063 +
47064 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47065 + old_path.dentry->d_inode,
47066 + old_path.dentry->d_inode->i_mode, to)) {
47067 + error = -EACCES;
47068 + goto out_drop_write;
47069 + }
47070 +
47071 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47072 + old_path.dentry, old_path.mnt, to)) {
47073 + error = -EACCES;
47074 + goto out_drop_write;
47075 + }
47076 +
47077 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47078 if (error)
47079 goto out_drop_write;
47080 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47081 + if (!error)
47082 + gr_handle_create(new_dentry, new_path.mnt);
47083 out_drop_write:
47084 mnt_drop_write(new_path.mnt);
47085 out_dput:
47086 + putname(to);
47087 dput(new_dentry);
47088 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47089 path_put(&new_path);
47090 @@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47091 if (new_dentry == trap)
47092 goto exit5;
47093
47094 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47095 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
47096 + to);
47097 + if (error)
47098 + goto exit5;
47099 +
47100 error = mnt_want_write(oldnd.path.mnt);
47101 if (error)
47102 goto exit5;
47103 @@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47104 goto exit6;
47105 error = vfs_rename(old_dir->d_inode, old_dentry,
47106 new_dir->d_inode, new_dentry);
47107 + if (!error)
47108 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47109 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47110 exit6:
47111 mnt_drop_write(oldnd.path.mnt);
47112 exit5:
47113 @@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47114
47115 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47116 {
47117 + char tmpbuf[64];
47118 + const char *newlink;
47119 int len;
47120
47121 len = PTR_ERR(link);
47122 @@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47123 len = strlen(link);
47124 if (len > (unsigned) buflen)
47125 len = buflen;
47126 - if (copy_to_user(buffer, link, len))
47127 +
47128 + if (len < sizeof(tmpbuf)) {
47129 + memcpy(tmpbuf, link, len);
47130 + newlink = tmpbuf;
47131 + } else
47132 + newlink = link;
47133 +
47134 + if (copy_to_user(buffer, newlink, len))
47135 len = -EFAULT;
47136 out:
47137 return len;
47138 diff --git a/fs/namespace.c b/fs/namespace.c
47139 index e608199..9609cb9 100644
47140 --- a/fs/namespace.c
47141 +++ b/fs/namespace.c
47142 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
47143 if (!(sb->s_flags & MS_RDONLY))
47144 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47145 up_write(&sb->s_umount);
47146 +
47147 + gr_log_remount(mnt->mnt_devname, retval);
47148 +
47149 return retval;
47150 }
47151
47152 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
47153 br_write_unlock(vfsmount_lock);
47154 up_write(&namespace_sem);
47155 release_mounts(&umount_list);
47156 +
47157 + gr_log_unmount(mnt->mnt_devname, retval);
47158 +
47159 return retval;
47160 }
47161
47162 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47163 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47164 MS_STRICTATIME);
47165
47166 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47167 + retval = -EPERM;
47168 + goto dput_out;
47169 + }
47170 +
47171 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47172 + retval = -EPERM;
47173 + goto dput_out;
47174 + }
47175 +
47176 if (flags & MS_REMOUNT)
47177 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47178 data_page);
47179 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47180 dev_name, data_page);
47181 dput_out:
47182 path_put(&path);
47183 +
47184 + gr_log_mount(dev_name, dir_name, retval);
47185 +
47186 return retval;
47187 }
47188
47189 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47190 if (error)
47191 goto out2;
47192
47193 + if (gr_handle_chroot_pivot()) {
47194 + error = -EPERM;
47195 + goto out2;
47196 + }
47197 +
47198 get_fs_root(current->fs, &root);
47199 error = lock_mount(&old);
47200 if (error)
47201 diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
47202 index 32c0658..b1c2045e 100644
47203 --- a/fs/ncpfs/ncplib_kernel.h
47204 +++ b/fs/ncpfs/ncplib_kernel.h
47205 @@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
47206 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
47207 const unsigned char *, unsigned int, int);
47208 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47209 - const unsigned char *, unsigned int, int);
47210 + const unsigned char *, unsigned int, int) __size_overflow(5);
47211
47212 #define NCP_ESC ':'
47213 #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
47214 @@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47215 int ncp__io2vol(unsigned char *, unsigned int *,
47216 const unsigned char *, unsigned int, int);
47217 int ncp__vol2io(unsigned char *, unsigned int *,
47218 - const unsigned char *, unsigned int, int);
47219 + const unsigned char *, unsigned int, int) __size_overflow(5);
47220
47221 #define NCP_IO_TABLE(sb) NULL
47222 #define ncp_tolower(t, c) tolower(c)
47223 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47224 index f649fba..236bf92 100644
47225 --- a/fs/nfs/inode.c
47226 +++ b/fs/nfs/inode.c
47227 @@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47228 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47229 nfsi->attrtimeo_timestamp = jiffies;
47230
47231 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47232 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47233 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47234 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47235 else
47236 @@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47237 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47238 }
47239
47240 -static atomic_long_t nfs_attr_generation_counter;
47241 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47242
47243 static unsigned long nfs_read_attr_generation_counter(void)
47244 {
47245 - return atomic_long_read(&nfs_attr_generation_counter);
47246 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47247 }
47248
47249 unsigned long nfs_inc_attr_generation_counter(void)
47250 {
47251 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47252 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47253 }
47254
47255 void nfs_fattr_init(struct nfs_fattr *fattr)
47256 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47257 index edf6d3e..bdd1da7 100644
47258 --- a/fs/nfsd/vfs.c
47259 +++ b/fs/nfsd/vfs.c
47260 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47261 } else {
47262 oldfs = get_fs();
47263 set_fs(KERNEL_DS);
47264 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47265 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47266 set_fs(oldfs);
47267 }
47268
47269 @@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47270
47271 /* Write the data. */
47272 oldfs = get_fs(); set_fs(KERNEL_DS);
47273 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47274 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47275 set_fs(oldfs);
47276 if (host_err < 0)
47277 goto out_nfserr;
47278 @@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47279 */
47280
47281 oldfs = get_fs(); set_fs(KERNEL_DS);
47282 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47283 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47284 set_fs(oldfs);
47285
47286 if (host_err < 0)
47287 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47288 index 3568c8a..e0240d8 100644
47289 --- a/fs/notify/fanotify/fanotify_user.c
47290 +++ b/fs/notify/fanotify/fanotify_user.c
47291 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47292 goto out_close_fd;
47293
47294 ret = -EFAULT;
47295 - if (copy_to_user(buf, &fanotify_event_metadata,
47296 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47297 + copy_to_user(buf, &fanotify_event_metadata,
47298 fanotify_event_metadata.event_len))
47299 goto out_kill_access_response;
47300
47301 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47302 index ee18815..7aa5d01 100644
47303 --- a/fs/notify/notification.c
47304 +++ b/fs/notify/notification.c
47305 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47306 * get set to 0 so it will never get 'freed'
47307 */
47308 static struct fsnotify_event *q_overflow_event;
47309 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47310 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47311
47312 /**
47313 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47314 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47315 */
47316 u32 fsnotify_get_cookie(void)
47317 {
47318 - return atomic_inc_return(&fsnotify_sync_cookie);
47319 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47320 }
47321 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47322
47323 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47324 index 99e3610..02c1068 100644
47325 --- a/fs/ntfs/dir.c
47326 +++ b/fs/ntfs/dir.c
47327 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47328 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47329 ~(s64)(ndir->itype.index.block_size - 1)));
47330 /* Bounds checks. */
47331 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47332 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47333 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47334 "inode 0x%lx or driver bug.", vdir->i_ino);
47335 goto err_out;
47336 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47337 index c587e2d..3641eaa 100644
47338 --- a/fs/ntfs/file.c
47339 +++ b/fs/ntfs/file.c
47340 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47341 #endif /* NTFS_RW */
47342 };
47343
47344 -const struct file_operations ntfs_empty_file_ops = {};
47345 +const struct file_operations ntfs_empty_file_ops __read_only;
47346
47347 -const struct inode_operations ntfs_empty_inode_ops = {};
47348 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47349 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47350 index 210c352..a174f83 100644
47351 --- a/fs/ocfs2/localalloc.c
47352 +++ b/fs/ocfs2/localalloc.c
47353 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47354 goto bail;
47355 }
47356
47357 - atomic_inc(&osb->alloc_stats.moves);
47358 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47359
47360 bail:
47361 if (handle)
47362 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47363 index d355e6e..578d905 100644
47364 --- a/fs/ocfs2/ocfs2.h
47365 +++ b/fs/ocfs2/ocfs2.h
47366 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47367
47368 struct ocfs2_alloc_stats
47369 {
47370 - atomic_t moves;
47371 - atomic_t local_data;
47372 - atomic_t bitmap_data;
47373 - atomic_t bg_allocs;
47374 - atomic_t bg_extends;
47375 + atomic_unchecked_t moves;
47376 + atomic_unchecked_t local_data;
47377 + atomic_unchecked_t bitmap_data;
47378 + atomic_unchecked_t bg_allocs;
47379 + atomic_unchecked_t bg_extends;
47380 };
47381
47382 enum ocfs2_local_alloc_state
47383 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47384 index ba5d97e..c77db25 100644
47385 --- a/fs/ocfs2/suballoc.c
47386 +++ b/fs/ocfs2/suballoc.c
47387 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47388 mlog_errno(status);
47389 goto bail;
47390 }
47391 - atomic_inc(&osb->alloc_stats.bg_extends);
47392 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47393
47394 /* You should never ask for this much metadata */
47395 BUG_ON(bits_wanted >
47396 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47397 mlog_errno(status);
47398 goto bail;
47399 }
47400 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47401 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47402
47403 *suballoc_loc = res.sr_bg_blkno;
47404 *suballoc_bit_start = res.sr_bit_offset;
47405 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47406 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47407 res->sr_bits);
47408
47409 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47410 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47411
47412 BUG_ON(res->sr_bits != 1);
47413
47414 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47415 mlog_errno(status);
47416 goto bail;
47417 }
47418 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47419 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47420
47421 BUG_ON(res.sr_bits != 1);
47422
47423 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47424 cluster_start,
47425 num_clusters);
47426 if (!status)
47427 - atomic_inc(&osb->alloc_stats.local_data);
47428 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47429 } else {
47430 if (min_clusters > (osb->bitmap_cpg - 1)) {
47431 /* The only paths asking for contiguousness
47432 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47433 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47434 res.sr_bg_blkno,
47435 res.sr_bit_offset);
47436 - atomic_inc(&osb->alloc_stats.bitmap_data);
47437 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47438 *num_clusters = res.sr_bits;
47439 }
47440 }
47441 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47442 index 604e12c..8426483 100644
47443 --- a/fs/ocfs2/super.c
47444 +++ b/fs/ocfs2/super.c
47445 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47446 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47447 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47448 "Stats",
47449 - atomic_read(&osb->alloc_stats.bitmap_data),
47450 - atomic_read(&osb->alloc_stats.local_data),
47451 - atomic_read(&osb->alloc_stats.bg_allocs),
47452 - atomic_read(&osb->alloc_stats.moves),
47453 - atomic_read(&osb->alloc_stats.bg_extends));
47454 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47455 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47456 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47457 + atomic_read_unchecked(&osb->alloc_stats.moves),
47458 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47459
47460 out += snprintf(buf + out, len - out,
47461 "%10s => State: %u Descriptor: %llu Size: %u bits "
47462 @@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47463 spin_lock_init(&osb->osb_xattr_lock);
47464 ocfs2_init_steal_slots(osb);
47465
47466 - atomic_set(&osb->alloc_stats.moves, 0);
47467 - atomic_set(&osb->alloc_stats.local_data, 0);
47468 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47469 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47470 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47471 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47472 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47473 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47474 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47475 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47476
47477 /* Copy the blockcheck stats from the superblock probe */
47478 osb->osb_ecc_stats = *stats;
47479 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47480 index 5d22872..523db20 100644
47481 --- a/fs/ocfs2/symlink.c
47482 +++ b/fs/ocfs2/symlink.c
47483 @@ -142,7 +142,7 @@ bail:
47484
47485 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47486 {
47487 - char *link = nd_get_link(nd);
47488 + const char *link = nd_get_link(nd);
47489 if (!IS_ERR(link))
47490 kfree(link);
47491 }
47492 diff --git a/fs/open.c b/fs/open.c
47493 index 77becc0..aad7bd9 100644
47494 --- a/fs/open.c
47495 +++ b/fs/open.c
47496 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47497 error = locks_verify_truncate(inode, NULL, length);
47498 if (!error)
47499 error = security_path_truncate(&path);
47500 +
47501 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47502 + error = -EACCES;
47503 +
47504 if (!error)
47505 error = do_truncate(path.dentry, length, 0, NULL);
47506
47507 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47508 if (__mnt_is_readonly(path.mnt))
47509 res = -EROFS;
47510
47511 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47512 + res = -EACCES;
47513 +
47514 out_path_release:
47515 path_put(&path);
47516 out:
47517 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47518 if (error)
47519 goto dput_and_out;
47520
47521 + gr_log_chdir(path.dentry, path.mnt);
47522 +
47523 set_fs_pwd(current->fs, &path);
47524
47525 dput_and_out:
47526 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47527 goto out_putf;
47528
47529 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47530 +
47531 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47532 + error = -EPERM;
47533 +
47534 + if (!error)
47535 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47536 +
47537 if (!error)
47538 set_fs_pwd(current->fs, &file->f_path);
47539 out_putf:
47540 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47541 if (error)
47542 goto dput_and_out;
47543
47544 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47545 + goto dput_and_out;
47546 +
47547 set_fs_root(current->fs, &path);
47548 +
47549 + gr_handle_chroot_chdir(&path);
47550 +
47551 error = 0;
47552 dput_and_out:
47553 path_put(&path);
47554 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47555 if (error)
47556 return error;
47557 mutex_lock(&inode->i_mutex);
47558 +
47559 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47560 + error = -EACCES;
47561 + goto out_unlock;
47562 + }
47563 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47564 + error = -EACCES;
47565 + goto out_unlock;
47566 + }
47567 +
47568 error = security_path_chmod(path, mode);
47569 if (error)
47570 goto out_unlock;
47571 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47572 int error;
47573 struct iattr newattrs;
47574
47575 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47576 + return -EACCES;
47577 +
47578 newattrs.ia_valid = ATTR_CTIME;
47579 if (user != (uid_t) -1) {
47580 newattrs.ia_valid |= ATTR_UID;
47581 diff --git a/fs/pipe.c b/fs/pipe.c
47582 index a932ced..6495412 100644
47583 --- a/fs/pipe.c
47584 +++ b/fs/pipe.c
47585 @@ -420,9 +420,9 @@ redo:
47586 }
47587 if (bufs) /* More to do? */
47588 continue;
47589 - if (!pipe->writers)
47590 + if (!atomic_read(&pipe->writers))
47591 break;
47592 - if (!pipe->waiting_writers) {
47593 + if (!atomic_read(&pipe->waiting_writers)) {
47594 /* syscall merging: Usually we must not sleep
47595 * if O_NONBLOCK is set, or if we got some data.
47596 * But if a writer sleeps in kernel space, then
47597 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47598 mutex_lock(&inode->i_mutex);
47599 pipe = inode->i_pipe;
47600
47601 - if (!pipe->readers) {
47602 + if (!atomic_read(&pipe->readers)) {
47603 send_sig(SIGPIPE, current, 0);
47604 ret = -EPIPE;
47605 goto out;
47606 @@ -530,7 +530,7 @@ redo1:
47607 for (;;) {
47608 int bufs;
47609
47610 - if (!pipe->readers) {
47611 + if (!atomic_read(&pipe->readers)) {
47612 send_sig(SIGPIPE, current, 0);
47613 if (!ret)
47614 ret = -EPIPE;
47615 @@ -616,9 +616,9 @@ redo2:
47616 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47617 do_wakeup = 0;
47618 }
47619 - pipe->waiting_writers++;
47620 + atomic_inc(&pipe->waiting_writers);
47621 pipe_wait(pipe);
47622 - pipe->waiting_writers--;
47623 + atomic_dec(&pipe->waiting_writers);
47624 }
47625 out:
47626 mutex_unlock(&inode->i_mutex);
47627 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47628 mask = 0;
47629 if (filp->f_mode & FMODE_READ) {
47630 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47631 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47632 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47633 mask |= POLLHUP;
47634 }
47635
47636 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47637 * Most Unices do not set POLLERR for FIFOs but on Linux they
47638 * behave exactly like pipes for poll().
47639 */
47640 - if (!pipe->readers)
47641 + if (!atomic_read(&pipe->readers))
47642 mask |= POLLERR;
47643 }
47644
47645 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47646
47647 mutex_lock(&inode->i_mutex);
47648 pipe = inode->i_pipe;
47649 - pipe->readers -= decr;
47650 - pipe->writers -= decw;
47651 + atomic_sub(decr, &pipe->readers);
47652 + atomic_sub(decw, &pipe->writers);
47653
47654 - if (!pipe->readers && !pipe->writers) {
47655 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47656 free_pipe_info(inode);
47657 } else {
47658 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47659 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47660
47661 if (inode->i_pipe) {
47662 ret = 0;
47663 - inode->i_pipe->readers++;
47664 + atomic_inc(&inode->i_pipe->readers);
47665 }
47666
47667 mutex_unlock(&inode->i_mutex);
47668 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47669
47670 if (inode->i_pipe) {
47671 ret = 0;
47672 - inode->i_pipe->writers++;
47673 + atomic_inc(&inode->i_pipe->writers);
47674 }
47675
47676 mutex_unlock(&inode->i_mutex);
47677 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47678 if (inode->i_pipe) {
47679 ret = 0;
47680 if (filp->f_mode & FMODE_READ)
47681 - inode->i_pipe->readers++;
47682 + atomic_inc(&inode->i_pipe->readers);
47683 if (filp->f_mode & FMODE_WRITE)
47684 - inode->i_pipe->writers++;
47685 + atomic_inc(&inode->i_pipe->writers);
47686 }
47687
47688 mutex_unlock(&inode->i_mutex);
47689 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
47690 inode->i_pipe = NULL;
47691 }
47692
47693 -static struct vfsmount *pipe_mnt __read_mostly;
47694 +struct vfsmount *pipe_mnt __read_mostly;
47695
47696 /*
47697 * pipefs_dname() is called from d_path().
47698 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
47699 goto fail_iput;
47700 inode->i_pipe = pipe;
47701
47702 - pipe->readers = pipe->writers = 1;
47703 + atomic_set(&pipe->readers, 1);
47704 + atomic_set(&pipe->writers, 1);
47705 inode->i_fop = &rdwr_pipefifo_fops;
47706
47707 /*
47708 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47709 index 15af622..0e9f4467 100644
47710 --- a/fs/proc/Kconfig
47711 +++ b/fs/proc/Kconfig
47712 @@ -30,12 +30,12 @@ config PROC_FS
47713
47714 config PROC_KCORE
47715 bool "/proc/kcore support" if !ARM
47716 - depends on PROC_FS && MMU
47717 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47718
47719 config PROC_VMCORE
47720 bool "/proc/vmcore support"
47721 - depends on PROC_FS && CRASH_DUMP
47722 - default y
47723 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47724 + default n
47725 help
47726 Exports the dump image of crashed kernel in ELF format.
47727
47728 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47729 limited in memory.
47730
47731 config PROC_PAGE_MONITOR
47732 - default y
47733 - depends on PROC_FS && MMU
47734 + default n
47735 + depends on PROC_FS && MMU && !GRKERNSEC
47736 bool "Enable /proc page monitoring" if EXPERT
47737 help
47738 Various /proc files exist to monitor process memory utilization:
47739 diff --git a/fs/proc/array.c b/fs/proc/array.c
47740 index c602b8d..a7de642 100644
47741 --- a/fs/proc/array.c
47742 +++ b/fs/proc/array.c
47743 @@ -60,6 +60,7 @@
47744 #include <linux/tty.h>
47745 #include <linux/string.h>
47746 #include <linux/mman.h>
47747 +#include <linux/grsecurity.h>
47748 #include <linux/proc_fs.h>
47749 #include <linux/ioport.h>
47750 #include <linux/uaccess.h>
47751 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47752 seq_putc(m, '\n');
47753 }
47754
47755 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47756 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47757 +{
47758 + if (p->mm)
47759 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47760 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47761 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47762 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47763 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47764 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47765 + else
47766 + seq_printf(m, "PaX:\t-----\n");
47767 +}
47768 +#endif
47769 +
47770 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47771 struct pid *pid, struct task_struct *task)
47772 {
47773 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47774 task_cpus_allowed(m, task);
47775 cpuset_task_status_allowed(m, task);
47776 task_context_switch_counts(m, task);
47777 +
47778 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47779 + task_pax(m, task);
47780 +#endif
47781 +
47782 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47783 + task_grsec_rbac(m, task);
47784 +#endif
47785 +
47786 return 0;
47787 }
47788
47789 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47790 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47791 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47792 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47793 +#endif
47794 +
47795 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47796 struct pid *pid, struct task_struct *task, int whole)
47797 {
47798 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47799 char tcomm[sizeof(task->comm)];
47800 unsigned long flags;
47801
47802 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47803 + if (current->exec_id != m->exec_id) {
47804 + gr_log_badprocpid("stat");
47805 + return 0;
47806 + }
47807 +#endif
47808 +
47809 state = *get_task_state(task);
47810 vsize = eip = esp = 0;
47811 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47812 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47813 gtime = task->gtime;
47814 }
47815
47816 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47817 + if (PAX_RAND_FLAGS(mm)) {
47818 + eip = 0;
47819 + esp = 0;
47820 + wchan = 0;
47821 + }
47822 +#endif
47823 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47824 + wchan = 0;
47825 + eip =0;
47826 + esp =0;
47827 +#endif
47828 +
47829 /* scale priority and nice values from timeslices to -20..20 */
47830 /* to make it look like a "normal" Unix priority/nice value */
47831 priority = task_prio(task);
47832 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47833 vsize,
47834 mm ? get_mm_rss(mm) : 0,
47835 rsslim,
47836 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47837 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47838 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47839 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47840 +#else
47841 mm ? (permitted ? mm->start_code : 1) : 0,
47842 mm ? (permitted ? mm->end_code : 1) : 0,
47843 (permitted && mm) ? mm->start_stack : 0,
47844 +#endif
47845 esp,
47846 eip,
47847 /* The signal information here is obsolete.
47848 @@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47849 struct pid *pid, struct task_struct *task)
47850 {
47851 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47852 - struct mm_struct *mm = get_task_mm(task);
47853 + struct mm_struct *mm;
47854
47855 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47856 + if (current->exec_id != m->exec_id) {
47857 + gr_log_badprocpid("statm");
47858 + return 0;
47859 + }
47860 +#endif
47861 + mm = get_task_mm(task);
47862 if (mm) {
47863 size = task_statm(mm, &shared, &text, &data, &resident);
47864 mmput(mm);
47865 @@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47866
47867 return 0;
47868 }
47869 +
47870 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47871 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47872 +{
47873 + u32 curr_ip = 0;
47874 + unsigned long flags;
47875 +
47876 + if (lock_task_sighand(task, &flags)) {
47877 + curr_ip = task->signal->curr_ip;
47878 + unlock_task_sighand(task, &flags);
47879 + }
47880 +
47881 + return sprintf(buffer, "%pI4\n", &curr_ip);
47882 +}
47883 +#endif
47884 diff --git a/fs/proc/base.c b/fs/proc/base.c
47885 index d4548dd..d101f84 100644
47886 --- a/fs/proc/base.c
47887 +++ b/fs/proc/base.c
47888 @@ -109,6 +109,14 @@ struct pid_entry {
47889 union proc_op op;
47890 };
47891
47892 +struct getdents_callback {
47893 + struct linux_dirent __user * current_dir;
47894 + struct linux_dirent __user * previous;
47895 + struct file * file;
47896 + int count;
47897 + int error;
47898 +};
47899 +
47900 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47901 .name = (NAME), \
47902 .len = sizeof(NAME) - 1, \
47903 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47904 if (!mm->arg_end)
47905 goto out_mm; /* Shh! No looking before we're done */
47906
47907 + if (gr_acl_handle_procpidmem(task))
47908 + goto out_mm;
47909 +
47910 len = mm->arg_end - mm->arg_start;
47911
47912 if (len > PAGE_SIZE)
47913 @@ -240,12 +251,28 @@ out:
47914 return res;
47915 }
47916
47917 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47918 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47919 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47920 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47921 +#endif
47922 +
47923 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47924 {
47925 struct mm_struct *mm = mm_for_maps(task);
47926 int res = PTR_ERR(mm);
47927 if (mm && !IS_ERR(mm)) {
47928 unsigned int nwords = 0;
47929 +
47930 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47931 + /* allow if we're currently ptracing this task */
47932 + if (PAX_RAND_FLAGS(mm) &&
47933 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47934 + mmput(mm);
47935 + return 0;
47936 + }
47937 +#endif
47938 +
47939 do {
47940 nwords += 2;
47941 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47942 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47943 }
47944
47945
47946 -#ifdef CONFIG_KALLSYMS
47947 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47948 /*
47949 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47950 * Returns the resolved symbol. If that fails, simply return the address.
47951 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47952 mutex_unlock(&task->signal->cred_guard_mutex);
47953 }
47954
47955 -#ifdef CONFIG_STACKTRACE
47956 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47957
47958 #define MAX_STACK_TRACE_DEPTH 64
47959
47960 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47961 return count;
47962 }
47963
47964 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47965 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47966 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47967 {
47968 long nr;
47969 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47970 /************************************************************************/
47971
47972 /* permission checks */
47973 -static int proc_fd_access_allowed(struct inode *inode)
47974 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47975 {
47976 struct task_struct *task;
47977 int allowed = 0;
47978 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47979 */
47980 task = get_proc_task(inode);
47981 if (task) {
47982 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47983 + if (log)
47984 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47985 + else
47986 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47987 put_task_struct(task);
47988 }
47989 return allowed;
47990 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47991 struct task_struct *task,
47992 int hide_pid_min)
47993 {
47994 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47995 + return false;
47996 +
47997 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47998 + rcu_read_lock();
47999 + {
48000 + const struct cred *tmpcred = current_cred();
48001 + const struct cred *cred = __task_cred(task);
48002 +
48003 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48004 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48005 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48006 +#endif
48007 + ) {
48008 + rcu_read_unlock();
48009 + return true;
48010 + }
48011 + }
48012 + rcu_read_unlock();
48013 +
48014 + if (!pid->hide_pid)
48015 + return false;
48016 +#endif
48017 +
48018 if (pid->hide_pid < hide_pid_min)
48019 return true;
48020 if (in_group_p(pid->pid_gid))
48021 return true;
48022 +
48023 return ptrace_may_access(task, PTRACE_MODE_READ);
48024 }
48025
48026 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48027 put_task_struct(task);
48028
48029 if (!has_perms) {
48030 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48031 + {
48032 +#else
48033 if (pid->hide_pid == 2) {
48034 +#endif
48035 /*
48036 * Let's make getdents(), stat(), and open()
48037 * consistent with each other. If a process
48038 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
48039 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48040 file->private_data = mm;
48041
48042 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48043 + file->f_version = current->exec_id;
48044 +#endif
48045 +
48046 return 0;
48047 }
48048
48049 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48050 ssize_t copied;
48051 char *page;
48052
48053 +#ifdef CONFIG_GRKERNSEC
48054 + if (write)
48055 + return -EPERM;
48056 +#endif
48057 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48058 + if (file->f_version != current->exec_id) {
48059 + gr_log_badprocpid("mem");
48060 + return 0;
48061 + }
48062 +#endif
48063 +
48064 if (!mm)
48065 return 0;
48066
48067 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48068 if (!task)
48069 goto out_no_task;
48070
48071 + if (gr_acl_handle_procpidmem(task))
48072 + goto out;
48073 +
48074 ret = -ENOMEM;
48075 page = (char *)__get_free_page(GFP_TEMPORARY);
48076 if (!page)
48077 @@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48078 path_put(&nd->path);
48079
48080 /* Are we allowed to snoop on the tasks file descriptors? */
48081 - if (!proc_fd_access_allowed(inode))
48082 + if (!proc_fd_access_allowed(inode, 0))
48083 goto out;
48084
48085 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48086 @@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48087 struct path path;
48088
48089 /* Are we allowed to snoop on the tasks file descriptors? */
48090 - if (!proc_fd_access_allowed(inode))
48091 - goto out;
48092 + /* logging this is needed for learning on chromium to work properly,
48093 + but we don't want to flood the logs from 'ps' which does a readlink
48094 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48095 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48096 + */
48097 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48098 + if (!proc_fd_access_allowed(inode,0))
48099 + goto out;
48100 + } else {
48101 + if (!proc_fd_access_allowed(inode,1))
48102 + goto out;
48103 + }
48104
48105 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48106 if (error)
48107 @@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48108 rcu_read_lock();
48109 cred = __task_cred(task);
48110 inode->i_uid = cred->euid;
48111 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48112 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48113 +#else
48114 inode->i_gid = cred->egid;
48115 +#endif
48116 rcu_read_unlock();
48117 }
48118 security_task_to_inode(task, inode);
48119 @@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48120 return -ENOENT;
48121 }
48122 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48123 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48124 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48125 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48126 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48127 +#endif
48128 task_dumpable(task)) {
48129 cred = __task_cred(task);
48130 stat->uid = cred->euid;
48131 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48132 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48133 +#else
48134 stat->gid = cred->egid;
48135 +#endif
48136 }
48137 }
48138 rcu_read_unlock();
48139 @@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48140
48141 if (task) {
48142 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48143 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48144 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48145 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48146 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48147 +#endif
48148 task_dumpable(task)) {
48149 rcu_read_lock();
48150 cred = __task_cred(task);
48151 inode->i_uid = cred->euid;
48152 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48153 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48154 +#else
48155 inode->i_gid = cred->egid;
48156 +#endif
48157 rcu_read_unlock();
48158 } else {
48159 inode->i_uid = 0;
48160 @@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48161 int fd = proc_fd(inode);
48162
48163 if (task) {
48164 - files = get_files_struct(task);
48165 + if (!gr_acl_handle_procpidmem(task))
48166 + files = get_files_struct(task);
48167 put_task_struct(task);
48168 }
48169 if (files) {
48170 @@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
48171 */
48172 static int proc_fd_permission(struct inode *inode, int mask)
48173 {
48174 + struct task_struct *task;
48175 int rv = generic_permission(inode, mask);
48176 - if (rv == 0)
48177 - return 0;
48178 +
48179 if (task_pid(current) == proc_pid(inode))
48180 rv = 0;
48181 +
48182 + task = get_proc_task(inode);
48183 + if (task == NULL)
48184 + return rv;
48185 +
48186 + if (gr_acl_handle_procpidmem(task))
48187 + rv = -EACCES;
48188 +
48189 + put_task_struct(task);
48190 +
48191 return rv;
48192 }
48193
48194 @@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48195 if (!task)
48196 goto out_no_task;
48197
48198 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48199 + goto out;
48200 +
48201 /*
48202 * Yes, it does not scale. And it should not. Don't add
48203 * new entries into /proc/<tgid>/ without very good reasons.
48204 @@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
48205 if (!task)
48206 goto out_no_task;
48207
48208 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48209 + goto out;
48210 +
48211 ret = 0;
48212 i = filp->f_pos;
48213 switch (i) {
48214 @@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48215 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48216 void *cookie)
48217 {
48218 - char *s = nd_get_link(nd);
48219 + const char *s = nd_get_link(nd);
48220 if (!IS_ERR(s))
48221 __putname(s);
48222 }
48223 @@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48224 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48225 #endif
48226 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48227 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48228 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48229 INF("syscall", S_IRUGO, proc_pid_syscall),
48230 #endif
48231 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48232 @@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48233 #ifdef CONFIG_SECURITY
48234 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48235 #endif
48236 -#ifdef CONFIG_KALLSYMS
48237 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48238 INF("wchan", S_IRUGO, proc_pid_wchan),
48239 #endif
48240 -#ifdef CONFIG_STACKTRACE
48241 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48242 ONE("stack", S_IRUGO, proc_pid_stack),
48243 #endif
48244 #ifdef CONFIG_SCHEDSTATS
48245 @@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48246 #ifdef CONFIG_HARDWALL
48247 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48248 #endif
48249 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48250 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48251 +#endif
48252 };
48253
48254 static int proc_tgid_base_readdir(struct file * filp,
48255 @@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48256 if (!inode)
48257 goto out;
48258
48259 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48260 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48261 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48262 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48263 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48264 +#else
48265 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48266 +#endif
48267 inode->i_op = &proc_tgid_base_inode_operations;
48268 inode->i_fop = &proc_tgid_base_operations;
48269 inode->i_flags|=S_IMMUTABLE;
48270 @@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48271 if (!task)
48272 goto out;
48273
48274 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48275 + goto out_put_task;
48276 +
48277 result = proc_pid_instantiate(dir, dentry, task, NULL);
48278 +out_put_task:
48279 put_task_struct(task);
48280 out:
48281 return result;
48282 @@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48283 static int fake_filldir(void *buf, const char *name, int namelen,
48284 loff_t offset, u64 ino, unsigned d_type)
48285 {
48286 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
48287 + __buf->error = -EINVAL;
48288 return 0;
48289 }
48290
48291 @@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
48292 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48293 #endif
48294 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48295 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48296 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48297 INF("syscall", S_IRUGO, proc_pid_syscall),
48298 #endif
48299 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48300 @@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
48301 #ifdef CONFIG_SECURITY
48302 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48303 #endif
48304 -#ifdef CONFIG_KALLSYMS
48305 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48306 INF("wchan", S_IRUGO, proc_pid_wchan),
48307 #endif
48308 -#ifdef CONFIG_STACKTRACE
48309 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48310 ONE("stack", S_IRUGO, proc_pid_stack),
48311 #endif
48312 #ifdef CONFIG_SCHEDSTATS
48313 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48314 index 82676e3..5f8518a 100644
48315 --- a/fs/proc/cmdline.c
48316 +++ b/fs/proc/cmdline.c
48317 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48318
48319 static int __init proc_cmdline_init(void)
48320 {
48321 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48322 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48323 +#else
48324 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48325 +#endif
48326 return 0;
48327 }
48328 module_init(proc_cmdline_init);
48329 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48330 index b143471..bb105e5 100644
48331 --- a/fs/proc/devices.c
48332 +++ b/fs/proc/devices.c
48333 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48334
48335 static int __init proc_devices_init(void)
48336 {
48337 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48338 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48339 +#else
48340 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48341 +#endif
48342 return 0;
48343 }
48344 module_init(proc_devices_init);
48345 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48346 index 84fd323..f698a32 100644
48347 --- a/fs/proc/inode.c
48348 +++ b/fs/proc/inode.c
48349 @@ -21,12 +21,18 @@
48350 #include <linux/seq_file.h>
48351 #include <linux/slab.h>
48352 #include <linux/mount.h>
48353 +#include <linux/grsecurity.h>
48354
48355 #include <asm/system.h>
48356 #include <asm/uaccess.h>
48357
48358 #include "internal.h"
48359
48360 +#ifdef CONFIG_PROC_SYSCTL
48361 +extern const struct inode_operations proc_sys_inode_operations;
48362 +extern const struct inode_operations proc_sys_dir_operations;
48363 +#endif
48364 +
48365 static void proc_evict_inode(struct inode *inode)
48366 {
48367 struct proc_dir_entry *de;
48368 @@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
48369 ns_ops = PROC_I(inode)->ns_ops;
48370 if (ns_ops && ns_ops->put)
48371 ns_ops->put(PROC_I(inode)->ns);
48372 +
48373 +#ifdef CONFIG_PROC_SYSCTL
48374 + if (inode->i_op == &proc_sys_inode_operations ||
48375 + inode->i_op == &proc_sys_dir_operations)
48376 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48377 +#endif
48378 +
48379 }
48380
48381 static struct kmem_cache * proc_inode_cachep;
48382 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48383 if (de->mode) {
48384 inode->i_mode = de->mode;
48385 inode->i_uid = de->uid;
48386 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48387 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48388 +#else
48389 inode->i_gid = de->gid;
48390 +#endif
48391 }
48392 if (de->size)
48393 inode->i_size = de->size;
48394 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48395 index 2925775..4f08fae 100644
48396 --- a/fs/proc/internal.h
48397 +++ b/fs/proc/internal.h
48398 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48399 struct pid *pid, struct task_struct *task);
48400 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48401 struct pid *pid, struct task_struct *task);
48402 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48403 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48404 +#endif
48405 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48406
48407 extern const struct file_operations proc_maps_operations;
48408 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48409 index d245cb2..f4e8498 100644
48410 --- a/fs/proc/kcore.c
48411 +++ b/fs/proc/kcore.c
48412 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48413 * the addresses in the elf_phdr on our list.
48414 */
48415 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48416 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48417 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48418 + if (tsz > buflen)
48419 tsz = buflen;
48420 -
48421 +
48422 while (buflen) {
48423 struct kcore_list *m;
48424
48425 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48426 kfree(elf_buf);
48427 } else {
48428 if (kern_addr_valid(start)) {
48429 - unsigned long n;
48430 + char *elf_buf;
48431 + mm_segment_t oldfs;
48432
48433 - n = copy_to_user(buffer, (char *)start, tsz);
48434 - /*
48435 - * We cannot distingush between fault on source
48436 - * and fault on destination. When this happens
48437 - * we clear too and hope it will trigger the
48438 - * EFAULT again.
48439 - */
48440 - if (n) {
48441 - if (clear_user(buffer + tsz - n,
48442 - n))
48443 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48444 + if (!elf_buf)
48445 + return -ENOMEM;
48446 + oldfs = get_fs();
48447 + set_fs(KERNEL_DS);
48448 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48449 + set_fs(oldfs);
48450 + if (copy_to_user(buffer, elf_buf, tsz)) {
48451 + kfree(elf_buf);
48452 return -EFAULT;
48453 + }
48454 }
48455 + set_fs(oldfs);
48456 + kfree(elf_buf);
48457 } else {
48458 if (clear_user(buffer, tsz))
48459 return -EFAULT;
48460 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48461
48462 static int open_kcore(struct inode *inode, struct file *filp)
48463 {
48464 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48465 + return -EPERM;
48466 +#endif
48467 if (!capable(CAP_SYS_RAWIO))
48468 return -EPERM;
48469 if (kcore_need_update)
48470 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48471 index 80e4645..53e5fcf 100644
48472 --- a/fs/proc/meminfo.c
48473 +++ b/fs/proc/meminfo.c
48474 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48475 vmi.used >> 10,
48476 vmi.largest_chunk >> 10
48477 #ifdef CONFIG_MEMORY_FAILURE
48478 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48479 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48480 #endif
48481 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48482 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48483 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48484 index b1822dd..df622cb 100644
48485 --- a/fs/proc/nommu.c
48486 +++ b/fs/proc/nommu.c
48487 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48488 if (len < 1)
48489 len = 1;
48490 seq_printf(m, "%*c", len, ' ');
48491 - seq_path(m, &file->f_path, "");
48492 + seq_path(m, &file->f_path, "\n\\");
48493 }
48494
48495 seq_putc(m, '\n');
48496 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48497 index 06e1cc1..177cd98 100644
48498 --- a/fs/proc/proc_net.c
48499 +++ b/fs/proc/proc_net.c
48500 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48501 struct task_struct *task;
48502 struct nsproxy *ns;
48503 struct net *net = NULL;
48504 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48505 + const struct cred *cred = current_cred();
48506 +#endif
48507 +
48508 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48509 + if (cred->fsuid)
48510 + return net;
48511 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48512 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48513 + return net;
48514 +#endif
48515
48516 rcu_read_lock();
48517 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48518 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48519 index 53c3bce..10ad159 100644
48520 --- a/fs/proc/proc_sysctl.c
48521 +++ b/fs/proc/proc_sysctl.c
48522 @@ -9,11 +9,13 @@
48523 #include <linux/namei.h>
48524 #include "internal.h"
48525
48526 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48527 +
48528 static const struct dentry_operations proc_sys_dentry_operations;
48529 static const struct file_operations proc_sys_file_operations;
48530 -static const struct inode_operations proc_sys_inode_operations;
48531 +const struct inode_operations proc_sys_inode_operations;
48532 static const struct file_operations proc_sys_dir_file_operations;
48533 -static const struct inode_operations proc_sys_dir_operations;
48534 +const struct inode_operations proc_sys_dir_operations;
48535
48536 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48537 {
48538 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48539
48540 err = NULL;
48541 d_set_d_op(dentry, &proc_sys_dentry_operations);
48542 +
48543 + gr_handle_proc_create(dentry, inode);
48544 +
48545 d_add(dentry, inode);
48546
48547 + if (gr_handle_sysctl(p, MAY_EXEC))
48548 + err = ERR_PTR(-ENOENT);
48549 +
48550 out:
48551 sysctl_head_finish(head);
48552 return err;
48553 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48554 if (!table->proc_handler)
48555 goto out;
48556
48557 +#ifdef CONFIG_GRKERNSEC
48558 + error = -EPERM;
48559 + if (write && !capable(CAP_SYS_ADMIN))
48560 + goto out;
48561 +#endif
48562 +
48563 /* careful: calling conventions are nasty here */
48564 res = count;
48565 error = table->proc_handler(table, write, buf, &res, ppos);
48566 @@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48567 return -ENOMEM;
48568 } else {
48569 d_set_d_op(child, &proc_sys_dentry_operations);
48570 +
48571 + gr_handle_proc_create(child, inode);
48572 +
48573 d_add(child, inode);
48574 }
48575 } else {
48576 @@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48577 if (*pos < file->f_pos)
48578 continue;
48579
48580 + if (gr_handle_sysctl(table, 0))
48581 + continue;
48582 +
48583 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48584 if (res)
48585 return res;
48586 @@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48587 if (IS_ERR(head))
48588 return PTR_ERR(head);
48589
48590 + if (table && gr_handle_sysctl(table, MAY_EXEC))
48591 + return -ENOENT;
48592 +
48593 generic_fillattr(inode, stat);
48594 if (table)
48595 stat->mode = (stat->mode & S_IFMT) | table->mode;
48596 @@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48597 .llseek = generic_file_llseek,
48598 };
48599
48600 -static const struct inode_operations proc_sys_inode_operations = {
48601 +const struct inode_operations proc_sys_inode_operations = {
48602 .permission = proc_sys_permission,
48603 .setattr = proc_sys_setattr,
48604 .getattr = proc_sys_getattr,
48605 };
48606
48607 -static const struct inode_operations proc_sys_dir_operations = {
48608 +const struct inode_operations proc_sys_dir_operations = {
48609 .lookup = proc_sys_lookup,
48610 .permission = proc_sys_permission,
48611 .setattr = proc_sys_setattr,
48612 diff --git a/fs/proc/root.c b/fs/proc/root.c
48613 index 46a15d8..335631a 100644
48614 --- a/fs/proc/root.c
48615 +++ b/fs/proc/root.c
48616 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
48617 #ifdef CONFIG_PROC_DEVICETREE
48618 proc_device_tree_init();
48619 #endif
48620 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48621 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48622 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48623 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48624 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48625 +#endif
48626 +#else
48627 proc_mkdir("bus", NULL);
48628 +#endif
48629 proc_sys_init();
48630 }
48631
48632 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48633 index 3efa725..23c925b 100644
48634 --- a/fs/proc/task_mmu.c
48635 +++ b/fs/proc/task_mmu.c
48636 @@ -11,6 +11,7 @@
48637 #include <linux/rmap.h>
48638 #include <linux/swap.h>
48639 #include <linux/swapops.h>
48640 +#include <linux/grsecurity.h>
48641
48642 #include <asm/elf.h>
48643 #include <asm/uaccess.h>
48644 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48645 "VmExe:\t%8lu kB\n"
48646 "VmLib:\t%8lu kB\n"
48647 "VmPTE:\t%8lu kB\n"
48648 - "VmSwap:\t%8lu kB\n",
48649 - hiwater_vm << (PAGE_SHIFT-10),
48650 + "VmSwap:\t%8lu kB\n"
48651 +
48652 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48653 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48654 +#endif
48655 +
48656 + ,hiwater_vm << (PAGE_SHIFT-10),
48657 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48658 mm->locked_vm << (PAGE_SHIFT-10),
48659 mm->pinned_vm << (PAGE_SHIFT-10),
48660 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48661 data << (PAGE_SHIFT-10),
48662 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48663 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48664 - swap << (PAGE_SHIFT-10));
48665 + swap << (PAGE_SHIFT-10)
48666 +
48667 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48668 + , mm->context.user_cs_base, mm->context.user_cs_limit
48669 +#endif
48670 +
48671 + );
48672 }
48673
48674 unsigned long task_vsize(struct mm_struct *mm)
48675 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48676 return ret;
48677 }
48678
48679 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48680 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48681 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48682 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48683 +#endif
48684 +
48685 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48686 {
48687 struct mm_struct *mm = vma->vm_mm;
48688 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48689 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48690 }
48691
48692 - /* We don't show the stack guard page in /proc/maps */
48693 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48694 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48695 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48696 +#else
48697 start = vma->vm_start;
48698 - if (stack_guard_page_start(vma, start))
48699 - start += PAGE_SIZE;
48700 end = vma->vm_end;
48701 - if (stack_guard_page_end(vma, end))
48702 - end -= PAGE_SIZE;
48703 +#endif
48704
48705 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48706 start,
48707 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48708 flags & VM_WRITE ? 'w' : '-',
48709 flags & VM_EXEC ? 'x' : '-',
48710 flags & VM_MAYSHARE ? 's' : 'p',
48711 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48712 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48713 +#else
48714 pgoff,
48715 +#endif
48716 MAJOR(dev), MINOR(dev), ino, &len);
48717
48718 /*
48719 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48720 */
48721 if (file) {
48722 pad_len_spaces(m, len);
48723 - seq_path(m, &file->f_path, "\n");
48724 + seq_path(m, &file->f_path, "\n\\");
48725 } else {
48726 const char *name = arch_vma_name(vma);
48727 if (!name) {
48728 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48729 if (vma->vm_start <= mm->brk &&
48730 vma->vm_end >= mm->start_brk) {
48731 name = "[heap]";
48732 - } else if (vma->vm_start <= mm->start_stack &&
48733 - vma->vm_end >= mm->start_stack) {
48734 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48735 + (vma->vm_start <= mm->start_stack &&
48736 + vma->vm_end >= mm->start_stack)) {
48737 name = "[stack]";
48738 }
48739 } else {
48740 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
48741 struct proc_maps_private *priv = m->private;
48742 struct task_struct *task = priv->task;
48743
48744 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48745 + if (current->exec_id != m->exec_id) {
48746 + gr_log_badprocpid("maps");
48747 + return 0;
48748 + }
48749 +#endif
48750 +
48751 show_map_vma(m, vma);
48752
48753 if (m->count < m->size) /* vma is copied successfully */
48754 @@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
48755 .private = &mss,
48756 };
48757
48758 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48759 + if (current->exec_id != m->exec_id) {
48760 + gr_log_badprocpid("smaps");
48761 + return 0;
48762 + }
48763 +#endif
48764 memset(&mss, 0, sizeof mss);
48765 - mss.vma = vma;
48766 - /* mmap_sem is held in m_start */
48767 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48768 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48769 -
48770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48771 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48772 +#endif
48773 + mss.vma = vma;
48774 + /* mmap_sem is held in m_start */
48775 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48776 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48777 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48778 + }
48779 +#endif
48780 show_map_vma(m, vma);
48781
48782 seq_printf(m,
48783 @@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
48784 "KernelPageSize: %8lu kB\n"
48785 "MMUPageSize: %8lu kB\n"
48786 "Locked: %8lu kB\n",
48787 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48788 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48789 +#else
48790 (vma->vm_end - vma->vm_start) >> 10,
48791 +#endif
48792 mss.resident >> 10,
48793 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48794 mss.shared_clean >> 10,
48795 @@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
48796 int n;
48797 char buffer[50];
48798
48799 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48800 + if (current->exec_id != m->exec_id) {
48801 + gr_log_badprocpid("numa_maps");
48802 + return 0;
48803 + }
48804 +#endif
48805 +
48806 if (!mm)
48807 return 0;
48808
48809 @@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
48810 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48811 mpol_cond_put(pol);
48812
48813 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48814 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48815 +#else
48816 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48817 +#endif
48818
48819 if (file) {
48820 seq_printf(m, " file=");
48821 - seq_path(m, &file->f_path, "\n\t= ");
48822 + seq_path(m, &file->f_path, "\n\t\\= ");
48823 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48824 seq_printf(m, " heap");
48825 } else if (vma->vm_start <= mm->start_stack &&
48826 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48827 index 980de54..2a4db5f 100644
48828 --- a/fs/proc/task_nommu.c
48829 +++ b/fs/proc/task_nommu.c
48830 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48831 else
48832 bytes += kobjsize(mm);
48833
48834 - if (current->fs && current->fs->users > 1)
48835 + if (current->fs && atomic_read(&current->fs->users) > 1)
48836 sbytes += kobjsize(current->fs);
48837 else
48838 bytes += kobjsize(current->fs);
48839 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48840
48841 if (file) {
48842 pad_len_spaces(m, len);
48843 - seq_path(m, &file->f_path, "");
48844 + seq_path(m, &file->f_path, "\n\\");
48845 } else if (mm) {
48846 if (vma->vm_start <= mm->start_stack &&
48847 vma->vm_end >= mm->start_stack) {
48848 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48849 index d67908b..d13f6a6 100644
48850 --- a/fs/quota/netlink.c
48851 +++ b/fs/quota/netlink.c
48852 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48853 void quota_send_warning(short type, unsigned int id, dev_t dev,
48854 const char warntype)
48855 {
48856 - static atomic_t seq;
48857 + static atomic_unchecked_t seq;
48858 struct sk_buff *skb;
48859 void *msg_head;
48860 int ret;
48861 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48862 "VFS: Not enough memory to send quota warning.\n");
48863 return;
48864 }
48865 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48866 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48867 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48868 if (!msg_head) {
48869 printk(KERN_ERR
48870 diff --git a/fs/readdir.c b/fs/readdir.c
48871 index 356f715..c918d38 100644
48872 --- a/fs/readdir.c
48873 +++ b/fs/readdir.c
48874 @@ -17,6 +17,7 @@
48875 #include <linux/security.h>
48876 #include <linux/syscalls.h>
48877 #include <linux/unistd.h>
48878 +#include <linux/namei.h>
48879
48880 #include <asm/uaccess.h>
48881
48882 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48883
48884 struct readdir_callback {
48885 struct old_linux_dirent __user * dirent;
48886 + struct file * file;
48887 int result;
48888 };
48889
48890 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48891 buf->result = -EOVERFLOW;
48892 return -EOVERFLOW;
48893 }
48894 +
48895 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48896 + return 0;
48897 +
48898 buf->result++;
48899 dirent = buf->dirent;
48900 if (!access_ok(VERIFY_WRITE, dirent,
48901 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48902
48903 buf.result = 0;
48904 buf.dirent = dirent;
48905 + buf.file = file;
48906
48907 error = vfs_readdir(file, fillonedir, &buf);
48908 if (buf.result)
48909 @@ -142,6 +149,7 @@ struct linux_dirent {
48910 struct getdents_callback {
48911 struct linux_dirent __user * current_dir;
48912 struct linux_dirent __user * previous;
48913 + struct file * file;
48914 int count;
48915 int error;
48916 };
48917 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48918 buf->error = -EOVERFLOW;
48919 return -EOVERFLOW;
48920 }
48921 +
48922 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48923 + return 0;
48924 +
48925 dirent = buf->previous;
48926 if (dirent) {
48927 if (__put_user(offset, &dirent->d_off))
48928 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48929 buf.previous = NULL;
48930 buf.count = count;
48931 buf.error = 0;
48932 + buf.file = file;
48933
48934 error = vfs_readdir(file, filldir, &buf);
48935 if (error >= 0)
48936 @@ -229,6 +242,7 @@ out:
48937 struct getdents_callback64 {
48938 struct linux_dirent64 __user * current_dir;
48939 struct linux_dirent64 __user * previous;
48940 + struct file *file;
48941 int count;
48942 int error;
48943 };
48944 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48945 buf->error = -EINVAL; /* only used if we fail.. */
48946 if (reclen > buf->count)
48947 return -EINVAL;
48948 +
48949 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48950 + return 0;
48951 +
48952 dirent = buf->previous;
48953 if (dirent) {
48954 if (__put_user(offset, &dirent->d_off))
48955 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48956
48957 buf.current_dir = dirent;
48958 buf.previous = NULL;
48959 + buf.file = file;
48960 buf.count = count;
48961 buf.error = 0;
48962
48963 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48964 error = buf.error;
48965 lastdirent = buf.previous;
48966 if (lastdirent) {
48967 - typeof(lastdirent->d_off) d_off = file->f_pos;
48968 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48969 if (__put_user(d_off, &lastdirent->d_off))
48970 error = -EFAULT;
48971 else
48972 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48973 index 60c0804..d814f98 100644
48974 --- a/fs/reiserfs/do_balan.c
48975 +++ b/fs/reiserfs/do_balan.c
48976 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48977 return;
48978 }
48979
48980 - atomic_inc(&(fs_generation(tb->tb_sb)));
48981 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48982 do_balance_starts(tb);
48983
48984 /* balance leaf returns 0 except if combining L R and S into
48985 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48986 index 7a99811..a7c96c4 100644
48987 --- a/fs/reiserfs/procfs.c
48988 +++ b/fs/reiserfs/procfs.c
48989 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48990 "SMALL_TAILS " : "NO_TAILS ",
48991 replay_only(sb) ? "REPLAY_ONLY " : "",
48992 convert_reiserfs(sb) ? "CONV " : "",
48993 - atomic_read(&r->s_generation_counter),
48994 + atomic_read_unchecked(&r->s_generation_counter),
48995 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48996 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48997 SF(s_good_search_by_key_reada), SF(s_bmaps),
48998 diff --git a/fs/select.c b/fs/select.c
48999 index e782258..3b4b44c 100644
49000 --- a/fs/select.c
49001 +++ b/fs/select.c
49002 @@ -20,6 +20,7 @@
49003 #include <linux/module.h>
49004 #include <linux/slab.h>
49005 #include <linux/poll.h>
49006 +#include <linux/security.h>
49007 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49008 #include <linux/file.h>
49009 #include <linux/fdtable.h>
49010 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49011 struct poll_list *walk = head;
49012 unsigned long todo = nfds;
49013
49014 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49015 if (nfds > rlimit(RLIMIT_NOFILE))
49016 return -EINVAL;
49017
49018 diff --git a/fs/seq_file.c b/fs/seq_file.c
49019 index 4023d6b..ab46c6a 100644
49020 --- a/fs/seq_file.c
49021 +++ b/fs/seq_file.c
49022 @@ -9,6 +9,7 @@
49023 #include <linux/module.h>
49024 #include <linux/seq_file.h>
49025 #include <linux/slab.h>
49026 +#include <linux/sched.h>
49027
49028 #include <asm/uaccess.h>
49029 #include <asm/page.h>
49030 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49031 memset(p, 0, sizeof(*p));
49032 mutex_init(&p->lock);
49033 p->op = op;
49034 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49035 + p->exec_id = current->exec_id;
49036 +#endif
49037
49038 /*
49039 * Wrappers around seq_open(e.g. swaps_open) need to be
49040 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49041 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49042 void *data)
49043 {
49044 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49045 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49046 int res = -ENOMEM;
49047
49048 if (op) {
49049 diff --git a/fs/splice.c b/fs/splice.c
49050 index 1ec0493..d6ab5c2 100644
49051 --- a/fs/splice.c
49052 +++ b/fs/splice.c
49053 @@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49054 pipe_lock(pipe);
49055
49056 for (;;) {
49057 - if (!pipe->readers) {
49058 + if (!atomic_read(&pipe->readers)) {
49059 send_sig(SIGPIPE, current, 0);
49060 if (!ret)
49061 ret = -EPIPE;
49062 @@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49063 do_wakeup = 0;
49064 }
49065
49066 - pipe->waiting_writers++;
49067 + atomic_inc(&pipe->waiting_writers);
49068 pipe_wait(pipe);
49069 - pipe->waiting_writers--;
49070 + atomic_dec(&pipe->waiting_writers);
49071 }
49072
49073 pipe_unlock(pipe);
49074 @@ -559,7 +559,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49075 old_fs = get_fs();
49076 set_fs(get_ds());
49077 /* The cast to a user pointer is valid due to the set_fs() */
49078 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49079 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49080 set_fs(old_fs);
49081
49082 return res;
49083 @@ -574,7 +574,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49084 old_fs = get_fs();
49085 set_fs(get_ds());
49086 /* The cast to a user pointer is valid due to the set_fs() */
49087 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49088 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49089 set_fs(old_fs);
49090
49091 return res;
49092 @@ -625,7 +625,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49093 goto err;
49094
49095 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49096 - vec[i].iov_base = (void __user *) page_address(page);
49097 + vec[i].iov_base = (void __force_user *) page_address(page);
49098 vec[i].iov_len = this_len;
49099 spd.pages[i] = page;
49100 spd.nr_pages++;
49101 @@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49102 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49103 {
49104 while (!pipe->nrbufs) {
49105 - if (!pipe->writers)
49106 + if (!atomic_read(&pipe->writers))
49107 return 0;
49108
49109 - if (!pipe->waiting_writers && sd->num_spliced)
49110 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49111 return 0;
49112
49113 if (sd->flags & SPLICE_F_NONBLOCK)
49114 @@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49115 * out of the pipe right after the splice_to_pipe(). So set
49116 * PIPE_READERS appropriately.
49117 */
49118 - pipe->readers = 1;
49119 + atomic_set(&pipe->readers, 1);
49120
49121 current->splice_pipe = pipe;
49122 }
49123 @@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49124 ret = -ERESTARTSYS;
49125 break;
49126 }
49127 - if (!pipe->writers)
49128 + if (!atomic_read(&pipe->writers))
49129 break;
49130 - if (!pipe->waiting_writers) {
49131 + if (!atomic_read(&pipe->waiting_writers)) {
49132 if (flags & SPLICE_F_NONBLOCK) {
49133 ret = -EAGAIN;
49134 break;
49135 @@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49136 pipe_lock(pipe);
49137
49138 while (pipe->nrbufs >= pipe->buffers) {
49139 - if (!pipe->readers) {
49140 + if (!atomic_read(&pipe->readers)) {
49141 send_sig(SIGPIPE, current, 0);
49142 ret = -EPIPE;
49143 break;
49144 @@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49145 ret = -ERESTARTSYS;
49146 break;
49147 }
49148 - pipe->waiting_writers++;
49149 + atomic_inc(&pipe->waiting_writers);
49150 pipe_wait(pipe);
49151 - pipe->waiting_writers--;
49152 + atomic_dec(&pipe->waiting_writers);
49153 }
49154
49155 pipe_unlock(pipe);
49156 @@ -1818,14 +1818,14 @@ retry:
49157 pipe_double_lock(ipipe, opipe);
49158
49159 do {
49160 - if (!opipe->readers) {
49161 + if (!atomic_read(&opipe->readers)) {
49162 send_sig(SIGPIPE, current, 0);
49163 if (!ret)
49164 ret = -EPIPE;
49165 break;
49166 }
49167
49168 - if (!ipipe->nrbufs && !ipipe->writers)
49169 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49170 break;
49171
49172 /*
49173 @@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49174 pipe_double_lock(ipipe, opipe);
49175
49176 do {
49177 - if (!opipe->readers) {
49178 + if (!atomic_read(&opipe->readers)) {
49179 send_sig(SIGPIPE, current, 0);
49180 if (!ret)
49181 ret = -EPIPE;
49182 @@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49183 * return EAGAIN if we have the potential of some data in the
49184 * future, otherwise just return 0
49185 */
49186 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49187 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49188 ret = -EAGAIN;
49189
49190 pipe_unlock(ipipe);
49191 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
49192 index a475983..9c6a1f0 100644
49193 --- a/fs/sysfs/bin.c
49194 +++ b/fs/sysfs/bin.c
49195 @@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count)
49196 }
49197
49198 static ssize_t
49199 +read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
49200 +static ssize_t
49201 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
49202 {
49203 struct bin_buffer *bb = file->private_data;
49204 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49205 index 7fdf6a7..e6cd8ad 100644
49206 --- a/fs/sysfs/dir.c
49207 +++ b/fs/sysfs/dir.c
49208 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49209 struct sysfs_dirent *sd;
49210 int rc;
49211
49212 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49213 + const char *parent_name = parent_sd->s_name;
49214 +
49215 + mode = S_IFDIR | S_IRWXU;
49216 +
49217 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49218 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49219 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49220 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49221 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49222 +#endif
49223 +
49224 /* allocate */
49225 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49226 if (!sd)
49227 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49228 index 00012e3..8392349 100644
49229 --- a/fs/sysfs/file.c
49230 +++ b/fs/sysfs/file.c
49231 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49232
49233 struct sysfs_open_dirent {
49234 atomic_t refcnt;
49235 - atomic_t event;
49236 + atomic_unchecked_t event;
49237 wait_queue_head_t poll;
49238 struct list_head buffers; /* goes through sysfs_buffer.list */
49239 };
49240 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49241 if (!sysfs_get_active(attr_sd))
49242 return -ENODEV;
49243
49244 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49245 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49246 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49247
49248 sysfs_put_active(attr_sd);
49249 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49250 return -ENOMEM;
49251
49252 atomic_set(&new_od->refcnt, 0);
49253 - atomic_set(&new_od->event, 1);
49254 + atomic_set_unchecked(&new_od->event, 1);
49255 init_waitqueue_head(&new_od->poll);
49256 INIT_LIST_HEAD(&new_od->buffers);
49257 goto retry;
49258 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49259
49260 sysfs_put_active(attr_sd);
49261
49262 - if (buffer->event != atomic_read(&od->event))
49263 + if (buffer->event != atomic_read_unchecked(&od->event))
49264 goto trigger;
49265
49266 return DEFAULT_POLLMASK;
49267 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49268
49269 od = sd->s_attr.open;
49270 if (od) {
49271 - atomic_inc(&od->event);
49272 + atomic_inc_unchecked(&od->event);
49273 wake_up_interruptible(&od->poll);
49274 }
49275
49276 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49277 index a7ac78f..02158e1 100644
49278 --- a/fs/sysfs/symlink.c
49279 +++ b/fs/sysfs/symlink.c
49280 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49281
49282 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49283 {
49284 - char *page = nd_get_link(nd);
49285 + const char *page = nd_get_link(nd);
49286 if (!IS_ERR(page))
49287 free_page((unsigned long)page);
49288 }
49289 diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
49290 index f922cba..062fb02 100644
49291 --- a/fs/ubifs/debug.c
49292 +++ b/fs/ubifs/debug.c
49293 @@ -2819,6 +2819,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
49294 * debugfs file. Returns %0 or %1 in case of success and a negative error code
49295 * in case of failure.
49296 */
49297 +static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2);
49298 static int interpret_user_input(const char __user *u, size_t count)
49299 {
49300 size_t buf_size;
49301 @@ -2837,6 +2838,8 @@ static int interpret_user_input(const char __user *u, size_t count)
49302 }
49303
49304 static ssize_t dfs_file_write(struct file *file, const char __user *u,
49305 + size_t count, loff_t *ppos) __size_overflow(3);
49306 +static ssize_t dfs_file_write(struct file *file, const char __user *u,
49307 size_t count, loff_t *ppos)
49308 {
49309 struct ubifs_info *c = file->private_data;
49310 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49311 index c175b4d..8f36a16 100644
49312 --- a/fs/udf/misc.c
49313 +++ b/fs/udf/misc.c
49314 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49315
49316 u8 udf_tag_checksum(const struct tag *t)
49317 {
49318 - u8 *data = (u8 *)t;
49319 + const u8 *data = (const u8 *)t;
49320 u8 checksum = 0;
49321 int i;
49322 for (i = 0; i < sizeof(struct tag); ++i)
49323 diff --git a/fs/utimes.c b/fs/utimes.c
49324 index ba653f3..06ea4b1 100644
49325 --- a/fs/utimes.c
49326 +++ b/fs/utimes.c
49327 @@ -1,6 +1,7 @@
49328 #include <linux/compiler.h>
49329 #include <linux/file.h>
49330 #include <linux/fs.h>
49331 +#include <linux/security.h>
49332 #include <linux/linkage.h>
49333 #include <linux/mount.h>
49334 #include <linux/namei.h>
49335 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49336 goto mnt_drop_write_and_out;
49337 }
49338 }
49339 +
49340 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49341 + error = -EACCES;
49342 + goto mnt_drop_write_and_out;
49343 + }
49344 +
49345 mutex_lock(&inode->i_mutex);
49346 error = notify_change(path->dentry, &newattrs);
49347 mutex_unlock(&inode->i_mutex);
49348 diff --git a/fs/xattr.c b/fs/xattr.c
49349 index 82f4337..236473c 100644
49350 --- a/fs/xattr.c
49351 +++ b/fs/xattr.c
49352 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49353 * Extended attribute SET operations
49354 */
49355 static long
49356 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49357 +setxattr(struct path *path, const char __user *name, const void __user *value,
49358 size_t size, int flags)
49359 {
49360 int error;
49361 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49362 return PTR_ERR(kvalue);
49363 }
49364
49365 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49366 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49367 + error = -EACCES;
49368 + goto out;
49369 + }
49370 +
49371 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49372 +out:
49373 kfree(kvalue);
49374 return error;
49375 }
49376 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49377 return error;
49378 error = mnt_want_write(path.mnt);
49379 if (!error) {
49380 - error = setxattr(path.dentry, name, value, size, flags);
49381 + error = setxattr(&path, name, value, size, flags);
49382 mnt_drop_write(path.mnt);
49383 }
49384 path_put(&path);
49385 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49386 return error;
49387 error = mnt_want_write(path.mnt);
49388 if (!error) {
49389 - error = setxattr(path.dentry, name, value, size, flags);
49390 + error = setxattr(&path, name, value, size, flags);
49391 mnt_drop_write(path.mnt);
49392 }
49393 path_put(&path);
49394 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49395 const void __user *,value, size_t, size, int, flags)
49396 {
49397 struct file *f;
49398 - struct dentry *dentry;
49399 int error = -EBADF;
49400
49401 f = fget(fd);
49402 if (!f)
49403 return error;
49404 - dentry = f->f_path.dentry;
49405 - audit_inode(NULL, dentry);
49406 + audit_inode(NULL, f->f_path.dentry);
49407 error = mnt_want_write_file(f);
49408 if (!error) {
49409 - error = setxattr(dentry, name, value, size, flags);
49410 + error = setxattr(&f->f_path, name, value, size, flags);
49411 mnt_drop_write_file(f);
49412 }
49413 fput(f);
49414 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49415 index 8d5a506..7f62712 100644
49416 --- a/fs/xattr_acl.c
49417 +++ b/fs/xattr_acl.c
49418 @@ -17,8 +17,8 @@
49419 struct posix_acl *
49420 posix_acl_from_xattr(const void *value, size_t size)
49421 {
49422 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49423 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49424 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49425 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49426 int count;
49427 struct posix_acl *acl;
49428 struct posix_acl_entry *acl_e;
49429 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49430 index 188ef2f..adcf864 100644
49431 --- a/fs/xfs/xfs_bmap.c
49432 +++ b/fs/xfs/xfs_bmap.c
49433 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49434 int nmap,
49435 int ret_nmap);
49436 #else
49437 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49438 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49439 #endif /* DEBUG */
49440
49441 STATIC int
49442 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49443 index 79d05e8..e3e5861 100644
49444 --- a/fs/xfs/xfs_dir2_sf.c
49445 +++ b/fs/xfs/xfs_dir2_sf.c
49446 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49447 }
49448
49449 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49450 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49451 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49452 + char name[sfep->namelen];
49453 + memcpy(name, sfep->name, sfep->namelen);
49454 + if (filldir(dirent, name, sfep->namelen,
49455 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49456 + *offset = off & 0x7fffffff;
49457 + return 0;
49458 + }
49459 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49460 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49461 *offset = off & 0x7fffffff;
49462 return 0;
49463 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49464 index 76f3ca5..f57f712 100644
49465 --- a/fs/xfs/xfs_ioctl.c
49466 +++ b/fs/xfs/xfs_ioctl.c
49467 @@ -128,7 +128,7 @@ xfs_find_handle(
49468 }
49469
49470 error = -EFAULT;
49471 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49472 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49473 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49474 goto out_put;
49475
49476 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49477 index ab30253..4d86958 100644
49478 --- a/fs/xfs/xfs_iops.c
49479 +++ b/fs/xfs/xfs_iops.c
49480 @@ -447,7 +447,7 @@ xfs_vn_put_link(
49481 struct nameidata *nd,
49482 void *p)
49483 {
49484 - char *s = nd_get_link(nd);
49485 + const char *s = nd_get_link(nd);
49486
49487 if (!IS_ERR(s))
49488 kfree(s);
49489 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49490 new file mode 100644
49491 index 0000000..4089e05
49492 --- /dev/null
49493 +++ b/grsecurity/Kconfig
49494 @@ -0,0 +1,1078 @@
49495 +#
49496 +# grecurity configuration
49497 +#
49498 +
49499 +menu "Grsecurity"
49500 +
49501 +config GRKERNSEC
49502 + bool "Grsecurity"
49503 + select CRYPTO
49504 + select CRYPTO_SHA256
49505 + help
49506 + If you say Y here, you will be able to configure many features
49507 + that will enhance the security of your system. It is highly
49508 + recommended that you say Y here and read through the help
49509 + for each option so that you fully understand the features and
49510 + can evaluate their usefulness for your machine.
49511 +
49512 +choice
49513 + prompt "Security Level"
49514 + depends on GRKERNSEC
49515 + default GRKERNSEC_CUSTOM
49516 +
49517 +config GRKERNSEC_LOW
49518 + bool "Low"
49519 + select GRKERNSEC_LINK
49520 + select GRKERNSEC_FIFO
49521 + select GRKERNSEC_RANDNET
49522 + select GRKERNSEC_DMESG
49523 + select GRKERNSEC_CHROOT
49524 + select GRKERNSEC_CHROOT_CHDIR
49525 +
49526 + help
49527 + If you choose this option, several of the grsecurity options will
49528 + be enabled that will give you greater protection against a number
49529 + of attacks, while assuring that none of your software will have any
49530 + conflicts with the additional security measures. If you run a lot
49531 + of unusual software, or you are having problems with the higher
49532 + security levels, you should say Y here. With this option, the
49533 + following features are enabled:
49534 +
49535 + - Linking restrictions
49536 + - FIFO restrictions
49537 + - Restricted dmesg
49538 + - Enforced chdir("/") on chroot
49539 + - Runtime module disabling
49540 +
49541 +config GRKERNSEC_MEDIUM
49542 + bool "Medium"
49543 + select PAX
49544 + select PAX_EI_PAX
49545 + select PAX_PT_PAX_FLAGS
49546 + select PAX_HAVE_ACL_FLAGS
49547 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49548 + select GRKERNSEC_CHROOT
49549 + select GRKERNSEC_CHROOT_SYSCTL
49550 + select GRKERNSEC_LINK
49551 + select GRKERNSEC_FIFO
49552 + select GRKERNSEC_DMESG
49553 + select GRKERNSEC_RANDNET
49554 + select GRKERNSEC_FORKFAIL
49555 + select GRKERNSEC_TIME
49556 + select GRKERNSEC_SIGNAL
49557 + select GRKERNSEC_CHROOT
49558 + select GRKERNSEC_CHROOT_UNIX
49559 + select GRKERNSEC_CHROOT_MOUNT
49560 + select GRKERNSEC_CHROOT_PIVOT
49561 + select GRKERNSEC_CHROOT_DOUBLE
49562 + select GRKERNSEC_CHROOT_CHDIR
49563 + select GRKERNSEC_CHROOT_MKNOD
49564 + select GRKERNSEC_PROC
49565 + select GRKERNSEC_PROC_USERGROUP
49566 + select PAX_RANDUSTACK
49567 + select PAX_ASLR
49568 + select PAX_RANDMMAP
49569 + select PAX_REFCOUNT if (X86 || SPARC64)
49570 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49571 +
49572 + help
49573 + If you say Y here, several features in addition to those included
49574 + in the low additional security level will be enabled. These
49575 + features provide even more security to your system, though in rare
49576 + cases they may be incompatible with very old or poorly written
49577 + software. If you enable this option, make sure that your auth
49578 + service (identd) is running as gid 1001. With this option,
49579 + the following features (in addition to those provided in the
49580 + low additional security level) will be enabled:
49581 +
49582 + - Failed fork logging
49583 + - Time change logging
49584 + - Signal logging
49585 + - Deny mounts in chroot
49586 + - Deny double chrooting
49587 + - Deny sysctl writes in chroot
49588 + - Deny mknod in chroot
49589 + - Deny access to abstract AF_UNIX sockets out of chroot
49590 + - Deny pivot_root in chroot
49591 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49592 + - /proc restrictions with special GID set to 10 (usually wheel)
49593 + - Address Space Layout Randomization (ASLR)
49594 + - Prevent exploitation of most refcount overflows
49595 + - Bounds checking of copying between the kernel and userland
49596 +
49597 +config GRKERNSEC_HIGH
49598 + bool "High"
49599 + select GRKERNSEC_LINK
49600 + select GRKERNSEC_FIFO
49601 + select GRKERNSEC_DMESG
49602 + select GRKERNSEC_FORKFAIL
49603 + select GRKERNSEC_TIME
49604 + select GRKERNSEC_SIGNAL
49605 + select GRKERNSEC_CHROOT
49606 + select GRKERNSEC_CHROOT_SHMAT
49607 + select GRKERNSEC_CHROOT_UNIX
49608 + select GRKERNSEC_CHROOT_MOUNT
49609 + select GRKERNSEC_CHROOT_FCHDIR
49610 + select GRKERNSEC_CHROOT_PIVOT
49611 + select GRKERNSEC_CHROOT_DOUBLE
49612 + select GRKERNSEC_CHROOT_CHDIR
49613 + select GRKERNSEC_CHROOT_MKNOD
49614 + select GRKERNSEC_CHROOT_CAPS
49615 + select GRKERNSEC_CHROOT_SYSCTL
49616 + select GRKERNSEC_CHROOT_FINDTASK
49617 + select GRKERNSEC_SYSFS_RESTRICT
49618 + select GRKERNSEC_PROC
49619 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49620 + select GRKERNSEC_HIDESYM
49621 + select GRKERNSEC_BRUTE
49622 + select GRKERNSEC_PROC_USERGROUP
49623 + select GRKERNSEC_KMEM
49624 + select GRKERNSEC_RESLOG
49625 + select GRKERNSEC_RANDNET
49626 + select GRKERNSEC_PROC_ADD
49627 + select GRKERNSEC_CHROOT_CHMOD
49628 + select GRKERNSEC_CHROOT_NICE
49629 + select GRKERNSEC_SETXID
49630 + select GRKERNSEC_AUDIT_MOUNT
49631 + select GRKERNSEC_MODHARDEN if (MODULES)
49632 + select GRKERNSEC_HARDEN_PTRACE
49633 + select GRKERNSEC_PTRACE_READEXEC
49634 + select GRKERNSEC_VM86 if (X86_32)
49635 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49636 + select PAX
49637 + select PAX_RANDUSTACK
49638 + select PAX_ASLR
49639 + select PAX_RANDMMAP
49640 + select PAX_NOEXEC
49641 + select PAX_MPROTECT
49642 + select PAX_EI_PAX
49643 + select PAX_PT_PAX_FLAGS
49644 + select PAX_HAVE_ACL_FLAGS
49645 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49646 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49647 + select PAX_RANDKSTACK if (X86_TSC && X86)
49648 + select PAX_SEGMEXEC if (X86_32)
49649 + select PAX_PAGEEXEC
49650 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49651 + select PAX_EMUTRAMP if (PARISC)
49652 + select PAX_EMUSIGRT if (PARISC)
49653 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49654 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49655 + select PAX_REFCOUNT if (X86 || SPARC64)
49656 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49657 + help
49658 + If you say Y here, many of the features of grsecurity will be
49659 + enabled, which will protect you against many kinds of attacks
49660 + against your system. The heightened security comes at a cost
49661 + of an increased chance of incompatibilities with rare software
49662 + on your machine. Since this security level enables PaX, you should
49663 + view <http://pax.grsecurity.net> and read about the PaX
49664 + project. While you are there, download chpax and run it on
49665 + binaries that cause problems with PaX. Also remember that
49666 + since the /proc restrictions are enabled, you must run your
49667 + identd as gid 1001. This security level enables the following
49668 + features in addition to those listed in the low and medium
49669 + security levels:
49670 +
49671 + - Additional /proc restrictions
49672 + - Chmod restrictions in chroot
49673 + - No signals, ptrace, or viewing of processes outside of chroot
49674 + - Capability restrictions in chroot
49675 + - Deny fchdir out of chroot
49676 + - Priority restrictions in chroot
49677 + - Segmentation-based implementation of PaX
49678 + - Mprotect restrictions
49679 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49680 + - Kernel stack randomization
49681 + - Mount/unmount/remount logging
49682 + - Kernel symbol hiding
49683 + - Hardening of module auto-loading
49684 + - Ptrace restrictions
49685 + - Restricted vm86 mode
49686 + - Restricted sysfs/debugfs
49687 + - Active kernel exploit response
49688 +
49689 +config GRKERNSEC_CUSTOM
49690 + bool "Custom"
49691 + help
49692 + If you say Y here, you will be able to configure every grsecurity
49693 + option, which allows you to enable many more features that aren't
49694 + covered in the basic security levels. These additional features
49695 + include TPE, socket restrictions, and the sysctl system for
49696 + grsecurity. It is advised that you read through the help for
49697 + each option to determine its usefulness in your situation.
49698 +
49699 +endchoice
49700 +
49701 +menu "Memory Protections"
49702 +depends on GRKERNSEC
49703 +
49704 +config GRKERNSEC_KMEM
49705 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49706 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49707 + help
49708 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49709 + be written to or read from to modify or leak the contents of the running
49710 + kernel. /dev/port will also not be allowed to be opened. If you have module
49711 + support disabled, enabling this will close up four ways that are
49712 + currently used to insert malicious code into the running kernel.
49713 + Even with all these features enabled, we still highly recommend that
49714 + you use the RBAC system, as it is still possible for an attacker to
49715 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49716 + If you are not using XFree86, you may be able to stop this additional
49717 + case by enabling the 'Disable privileged I/O' option. Though nothing
49718 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49719 + but only to video memory, which is the only writing we allow in this
49720 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49721 + not be allowed to mprotect it with PROT_WRITE later.
49722 + It is highly recommended that you say Y here if you meet all the
49723 + conditions above.
49724 +
49725 +config GRKERNSEC_VM86
49726 + bool "Restrict VM86 mode"
49727 + depends on X86_32
49728 +
49729 + help
49730 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49731 + make use of a special execution mode on 32bit x86 processors called
49732 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49733 + video cards and will still work with this option enabled. The purpose
49734 + of the option is to prevent exploitation of emulation errors in
49735 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49736 + Nearly all users should be able to enable this option.
49737 +
49738 +config GRKERNSEC_IO
49739 + bool "Disable privileged I/O"
49740 + depends on X86
49741 + select RTC_CLASS
49742 + select RTC_INTF_DEV
49743 + select RTC_DRV_CMOS
49744 +
49745 + help
49746 + If you say Y here, all ioperm and iopl calls will return an error.
49747 + Ioperm and iopl can be used to modify the running kernel.
49748 + Unfortunately, some programs need this access to operate properly,
49749 + the most notable of which are XFree86 and hwclock. hwclock can be
49750 + remedied by having RTC support in the kernel, so real-time
49751 + clock support is enabled if this option is enabled, to ensure
49752 + that hwclock operates correctly. XFree86 still will not
49753 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49754 + IF YOU USE XFree86. If you use XFree86 and you still want to
49755 + protect your kernel against modification, use the RBAC system.
49756 +
49757 +config GRKERNSEC_PROC_MEMMAP
49758 + bool "Harden ASLR against information leaks and entropy reduction"
49759 + default y if (PAX_NOEXEC || PAX_ASLR)
49760 + depends on PAX_NOEXEC || PAX_ASLR
49761 + help
49762 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49763 + give no information about the addresses of its mappings if
49764 + PaX features that rely on random addresses are enabled on the task.
49765 + In addition to sanitizing this information and disabling other
49766 + dangerous sources of information, this option causes reads of sensitive
49767 + /proc/<pid> entries where the file descriptor was opened in a different
49768 + task than the one performing the read. Such attempts are logged.
49769 + This option also limits argv/env strings for suid/sgid binaries
49770 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49771 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49772 + binaries to prevent alternative mmap layouts from being abused.
49773 +
49774 + If you use PaX it is essential that you say Y here as it closes up
49775 + several holes that make full ASLR useless locally.
49776 +
49777 +config GRKERNSEC_BRUTE
49778 + bool "Deter exploit bruteforcing"
49779 + help
49780 + If you say Y here, attempts to bruteforce exploits against forking
49781 + daemons such as apache or sshd, as well as against suid/sgid binaries
49782 + will be deterred. When a child of a forking daemon is killed by PaX
49783 + or crashes due to an illegal instruction or other suspicious signal,
49784 + the parent process will be delayed 30 seconds upon every subsequent
49785 + fork until the administrator is able to assess the situation and
49786 + restart the daemon.
49787 + In the suid/sgid case, the attempt is logged, the user has all their
49788 + processes terminated, and they are prevented from executing any further
49789 + processes for 15 minutes.
49790 + It is recommended that you also enable signal logging in the auditing
49791 + section so that logs are generated when a process triggers a suspicious
49792 + signal.
49793 + If the sysctl option is enabled, a sysctl option with name
49794 + "deter_bruteforce" is created.
49795 +
49796 +
49797 +config GRKERNSEC_MODHARDEN
49798 + bool "Harden module auto-loading"
49799 + depends on MODULES
49800 + help
49801 + If you say Y here, module auto-loading in response to use of some
49802 + feature implemented by an unloaded module will be restricted to
49803 + root users. Enabling this option helps defend against attacks
49804 + by unprivileged users who abuse the auto-loading behavior to
49805 + cause a vulnerable module to load that is then exploited.
49806 +
49807 + If this option prevents a legitimate use of auto-loading for a
49808 + non-root user, the administrator can execute modprobe manually
49809 + with the exact name of the module mentioned in the alert log.
49810 + Alternatively, the administrator can add the module to the list
49811 + of modules loaded at boot by modifying init scripts.
49812 +
49813 + Modification of init scripts will most likely be needed on
49814 + Ubuntu servers with encrypted home directory support enabled,
49815 + as the first non-root user logging in will cause the ecb(aes),
49816 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49817 +
49818 +config GRKERNSEC_HIDESYM
49819 + bool "Hide kernel symbols"
49820 + help
49821 + If you say Y here, getting information on loaded modules, and
49822 + displaying all kernel symbols through a syscall will be restricted
49823 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49824 + /proc/kallsyms will be restricted to the root user. The RBAC
49825 + system can hide that entry even from root.
49826 +
49827 + This option also prevents leaking of kernel addresses through
49828 + several /proc entries.
49829 +
49830 + Note that this option is only effective provided the following
49831 + conditions are met:
49832 + 1) The kernel using grsecurity is not precompiled by some distribution
49833 + 2) You have also enabled GRKERNSEC_DMESG
49834 + 3) You are using the RBAC system and hiding other files such as your
49835 + kernel image and System.map. Alternatively, enabling this option
49836 + causes the permissions on /boot, /lib/modules, and the kernel
49837 + source directory to change at compile time to prevent
49838 + reading by non-root users.
49839 + If the above conditions are met, this option will aid in providing a
49840 + useful protection against local kernel exploitation of overflows
49841 + and arbitrary read/write vulnerabilities.
49842 +
49843 +config GRKERNSEC_KERN_LOCKOUT
49844 + bool "Active kernel exploit response"
49845 + depends on X86 || ARM || PPC || SPARC
49846 + help
49847 + If you say Y here, when a PaX alert is triggered due to suspicious
49848 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49849 + or an OOPs occurs due to bad memory accesses, instead of just
49850 + terminating the offending process (and potentially allowing
49851 + a subsequent exploit from the same user), we will take one of two
49852 + actions:
49853 + If the user was root, we will panic the system
49854 + If the user was non-root, we will log the attempt, terminate
49855 + all processes owned by the user, then prevent them from creating
49856 + any new processes until the system is restarted
49857 + This deters repeated kernel exploitation/bruteforcing attempts
49858 + and is useful for later forensics.
49859 +
49860 +endmenu
49861 +menu "Role Based Access Control Options"
49862 +depends on GRKERNSEC
49863 +
49864 +config GRKERNSEC_RBAC_DEBUG
49865 + bool
49866 +
49867 +config GRKERNSEC_NO_RBAC
49868 + bool "Disable RBAC system"
49869 + help
49870 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49871 + preventing the RBAC system from being enabled. You should only say Y
49872 + here if you have no intention of using the RBAC system, so as to prevent
49873 + an attacker with root access from misusing the RBAC system to hide files
49874 + and processes when loadable module support and /dev/[k]mem have been
49875 + locked down.
49876 +
49877 +config GRKERNSEC_ACL_HIDEKERN
49878 + bool "Hide kernel processes"
49879 + help
49880 + If you say Y here, all kernel threads will be hidden to all
49881 + processes but those whose subject has the "view hidden processes"
49882 + flag.
49883 +
49884 +config GRKERNSEC_ACL_MAXTRIES
49885 + int "Maximum tries before password lockout"
49886 + default 3
49887 + help
49888 + This option enforces the maximum number of times a user can attempt
49889 + to authorize themselves with the grsecurity RBAC system before being
49890 + denied the ability to attempt authorization again for a specified time.
49891 + The lower the number, the harder it will be to brute-force a password.
49892 +
49893 +config GRKERNSEC_ACL_TIMEOUT
49894 + int "Time to wait after max password tries, in seconds"
49895 + default 30
49896 + help
49897 + This option specifies the time the user must wait after attempting to
49898 + authorize to the RBAC system with the maximum number of invalid
49899 + passwords. The higher the number, the harder it will be to brute-force
49900 + a password.
49901 +
49902 +endmenu
49903 +menu "Filesystem Protections"
49904 +depends on GRKERNSEC
49905 +
49906 +config GRKERNSEC_PROC
49907 + bool "Proc restrictions"
49908 + help
49909 + If you say Y here, the permissions of the /proc filesystem
49910 + will be altered to enhance system security and privacy. You MUST
49911 + choose either a user only restriction or a user and group restriction.
49912 + Depending upon the option you choose, you can either restrict users to
49913 + see only the processes they themselves run, or choose a group that can
49914 + view all processes and files normally restricted to root if you choose
49915 + the "restrict to user only" option. NOTE: If you're running identd or
49916 + ntpd as a non-root user, you will have to run it as the group you
49917 + specify here.
49918 +
49919 +config GRKERNSEC_PROC_USER
49920 + bool "Restrict /proc to user only"
49921 + depends on GRKERNSEC_PROC
49922 + help
49923 + If you say Y here, non-root users will only be able to view their own
49924 + processes, and restricts them from viewing network-related information,
49925 + and viewing kernel symbol and module information.
49926 +
49927 +config GRKERNSEC_PROC_USERGROUP
49928 + bool "Allow special group"
49929 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49930 + help
49931 + If you say Y here, you will be able to select a group that will be
49932 + able to view all processes and network-related information. If you've
49933 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49934 + remain hidden. This option is useful if you want to run identd as
49935 + a non-root user.
49936 +
49937 +config GRKERNSEC_PROC_GID
49938 + int "GID for special group"
49939 + depends on GRKERNSEC_PROC_USERGROUP
49940 + default 1001
49941 +
49942 +config GRKERNSEC_PROC_ADD
49943 + bool "Additional restrictions"
49944 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49945 + help
49946 + If you say Y here, additional restrictions will be placed on
49947 + /proc that keep normal users from viewing device information and
49948 + slabinfo information that could be useful for exploits.
49949 +
49950 +config GRKERNSEC_LINK
49951 + bool "Linking restrictions"
49952 + help
49953 + If you say Y here, /tmp race exploits will be prevented, since users
49954 + will no longer be able to follow symlinks owned by other users in
49955 + world-writable +t directories (e.g. /tmp), unless the owner of the
49956 + symlink is the owner of the directory. users will also not be
49957 + able to hardlink to files they do not own. If the sysctl option is
49958 + enabled, a sysctl option with name "linking_restrictions" is created.
49959 +
49960 +config GRKERNSEC_FIFO
49961 + bool "FIFO restrictions"
49962 + help
49963 + If you say Y here, users will not be able to write to FIFOs they don't
49964 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49965 + the FIFO is the same owner of the directory it's held in. If the sysctl
49966 + option is enabled, a sysctl option with name "fifo_restrictions" is
49967 + created.
49968 +
49969 +config GRKERNSEC_SYSFS_RESTRICT
49970 + bool "Sysfs/debugfs restriction"
49971 + depends on SYSFS
49972 + help
49973 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49974 + any filesystem normally mounted under it (e.g. debugfs) will be
49975 + mostly accessible only by root. These filesystems generally provide access
49976 + to hardware and debug information that isn't appropriate for unprivileged
49977 + users of the system. Sysfs and debugfs have also become a large source
49978 + of new vulnerabilities, ranging from infoleaks to local compromise.
49979 + There has been very little oversight with an eye toward security involved
49980 + in adding new exporters of information to these filesystems, so their
49981 + use is discouraged.
49982 + For reasons of compatibility, a few directories have been whitelisted
49983 + for access by non-root users:
49984 + /sys/fs/selinux
49985 + /sys/fs/fuse
49986 + /sys/devices/system/cpu
49987 +
49988 +config GRKERNSEC_ROFS
49989 + bool "Runtime read-only mount protection"
49990 + help
49991 + If you say Y here, a sysctl option with name "romount_protect" will
49992 + be created. By setting this option to 1 at runtime, filesystems
49993 + will be protected in the following ways:
49994 + * No new writable mounts will be allowed
49995 + * Existing read-only mounts won't be able to be remounted read/write
49996 + * Write operations will be denied on all block devices
49997 + This option acts independently of grsec_lock: once it is set to 1,
49998 + it cannot be turned off. Therefore, please be mindful of the resulting
49999 + behavior if this option is enabled in an init script on a read-only
50000 + filesystem. This feature is mainly intended for secure embedded systems.
50001 +
50002 +config GRKERNSEC_CHROOT
50003 + bool "Chroot jail restrictions"
50004 + help
50005 + If you say Y here, you will be able to choose several options that will
50006 + make breaking out of a chrooted jail much more difficult. If you
50007 + encounter no software incompatibilities with the following options, it
50008 + is recommended that you enable each one.
50009 +
50010 +config GRKERNSEC_CHROOT_MOUNT
50011 + bool "Deny mounts"
50012 + depends on GRKERNSEC_CHROOT
50013 + help
50014 + If you say Y here, processes inside a chroot will not be able to
50015 + mount or remount filesystems. If the sysctl option is enabled, a
50016 + sysctl option with name "chroot_deny_mount" is created.
50017 +
50018 +config GRKERNSEC_CHROOT_DOUBLE
50019 + bool "Deny double-chroots"
50020 + depends on GRKERNSEC_CHROOT
50021 + help
50022 + If you say Y here, processes inside a chroot will not be able to chroot
50023 + again outside the chroot. This is a widely used method of breaking
50024 + out of a chroot jail and should not be allowed. If the sysctl
50025 + option is enabled, a sysctl option with name
50026 + "chroot_deny_chroot" is created.
50027 +
50028 +config GRKERNSEC_CHROOT_PIVOT
50029 + bool "Deny pivot_root in chroot"
50030 + depends on GRKERNSEC_CHROOT
50031 + help
50032 + If you say Y here, processes inside a chroot will not be able to use
50033 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50034 + works similar to chroot in that it changes the root filesystem. This
50035 + function could be misused in a chrooted process to attempt to break out
50036 + of the chroot, and therefore should not be allowed. If the sysctl
50037 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50038 + created.
50039 +
50040 +config GRKERNSEC_CHROOT_CHDIR
50041 + bool "Enforce chdir(\"/\") on all chroots"
50042 + depends on GRKERNSEC_CHROOT
50043 + help
50044 + If you say Y here, the current working directory of all newly-chrooted
50045 + applications will be set to the the root directory of the chroot.
50046 + The man page on chroot(2) states:
50047 + Note that this call does not change the current working
50048 + directory, so that `.' can be outside the tree rooted at
50049 + `/'. In particular, the super-user can escape from a
50050 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50051 +
50052 + It is recommended that you say Y here, since it's not known to break
50053 + any software. If the sysctl option is enabled, a sysctl option with
50054 + name "chroot_enforce_chdir" is created.
50055 +
50056 +config GRKERNSEC_CHROOT_CHMOD
50057 + bool "Deny (f)chmod +s"
50058 + depends on GRKERNSEC_CHROOT
50059 + help
50060 + If you say Y here, processes inside a chroot will not be able to chmod
50061 + or fchmod files to make them have suid or sgid bits. This protects
50062 + against another published method of breaking a chroot. If the sysctl
50063 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50064 + created.
50065 +
50066 +config GRKERNSEC_CHROOT_FCHDIR
50067 + bool "Deny fchdir out of chroot"
50068 + depends on GRKERNSEC_CHROOT
50069 + help
50070 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50071 + to a file descriptor of the chrooting process that points to a directory
50072 + outside the filesystem will be stopped. If the sysctl option
50073 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50074 +
50075 +config GRKERNSEC_CHROOT_MKNOD
50076 + bool "Deny mknod"
50077 + depends on GRKERNSEC_CHROOT
50078 + help
50079 + If you say Y here, processes inside a chroot will not be allowed to
50080 + mknod. The problem with using mknod inside a chroot is that it
50081 + would allow an attacker to create a device entry that is the same
50082 + as one on the physical root of your system, which could range from
50083 + anything from the console device to a device for your harddrive (which
50084 + they could then use to wipe the drive or steal data). It is recommended
50085 + that you say Y here, unless you run into software incompatibilities.
50086 + If the sysctl option is enabled, a sysctl option with name
50087 + "chroot_deny_mknod" is created.
50088 +
50089 +config GRKERNSEC_CHROOT_SHMAT
50090 + bool "Deny shmat() out of chroot"
50091 + depends on GRKERNSEC_CHROOT
50092 + help
50093 + If you say Y here, processes inside a chroot will not be able to attach
50094 + to shared memory segments that were created outside of the chroot jail.
50095 + It is recommended that you say Y here. If the sysctl option is enabled,
50096 + a sysctl option with name "chroot_deny_shmat" is created.
50097 +
50098 +config GRKERNSEC_CHROOT_UNIX
50099 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50100 + depends on GRKERNSEC_CHROOT
50101 + help
50102 + If you say Y here, processes inside a chroot will not be able to
50103 + connect to abstract (meaning not belonging to a filesystem) Unix
50104 + domain sockets that were bound outside of a chroot. It is recommended
50105 + that you say Y here. If the sysctl option is enabled, a sysctl option
50106 + with name "chroot_deny_unix" is created.
50107 +
50108 +config GRKERNSEC_CHROOT_FINDTASK
50109 + bool "Protect outside processes"
50110 + depends on GRKERNSEC_CHROOT
50111 + help
50112 + If you say Y here, processes inside a chroot will not be able to
50113 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50114 + getsid, or view any process outside of the chroot. If the sysctl
50115 + option is enabled, a sysctl option with name "chroot_findtask" is
50116 + created.
50117 +
50118 +config GRKERNSEC_CHROOT_NICE
50119 + bool "Restrict priority changes"
50120 + depends on GRKERNSEC_CHROOT
50121 + help
50122 + If you say Y here, processes inside a chroot will not be able to raise
50123 + the priority of processes in the chroot, or alter the priority of
50124 + processes outside the chroot. This provides more security than simply
50125 + removing CAP_SYS_NICE from the process' capability set. If the
50126 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50127 + is created.
50128 +
50129 +config GRKERNSEC_CHROOT_SYSCTL
50130 + bool "Deny sysctl writes"
50131 + depends on GRKERNSEC_CHROOT
50132 + help
50133 + If you say Y here, an attacker in a chroot will not be able to
50134 + write to sysctl entries, either by sysctl(2) or through a /proc
50135 + interface. It is strongly recommended that you say Y here. If the
50136 + sysctl option is enabled, a sysctl option with name
50137 + "chroot_deny_sysctl" is created.
50138 +
50139 +config GRKERNSEC_CHROOT_CAPS
50140 + bool "Capability restrictions"
50141 + depends on GRKERNSEC_CHROOT
50142 + help
50143 + If you say Y here, the capabilities on all processes within a
50144 + chroot jail will be lowered to stop module insertion, raw i/o,
50145 + system and net admin tasks, rebooting the system, modifying immutable
50146 + files, modifying IPC owned by another, and changing the system time.
50147 + This is left an option because it can break some apps. Disable this
50148 + if your chrooted apps are having problems performing those kinds of
50149 + tasks. If the sysctl option is enabled, a sysctl option with
50150 + name "chroot_caps" is created.
50151 +
50152 +endmenu
50153 +menu "Kernel Auditing"
50154 +depends on GRKERNSEC
50155 +
50156 +config GRKERNSEC_AUDIT_GROUP
50157 + bool "Single group for auditing"
50158 + help
50159 + If you say Y here, the exec, chdir, and (un)mount logging features
50160 + will only operate on a group you specify. This option is recommended
50161 + if you only want to watch certain users instead of having a large
50162 + amount of logs from the entire system. If the sysctl option is enabled,
50163 + a sysctl option with name "audit_group" is created.
50164 +
50165 +config GRKERNSEC_AUDIT_GID
50166 + int "GID for auditing"
50167 + depends on GRKERNSEC_AUDIT_GROUP
50168 + default 1007
50169 +
50170 +config GRKERNSEC_EXECLOG
50171 + bool "Exec logging"
50172 + help
50173 + If you say Y here, all execve() calls will be logged (since the
50174 + other exec*() calls are frontends to execve(), all execution
50175 + will be logged). Useful for shell-servers that like to keep track
50176 + of their users. If the sysctl option is enabled, a sysctl option with
50177 + name "exec_logging" is created.
50178 + WARNING: This option when enabled will produce a LOT of logs, especially
50179 + on an active system.
50180 +
50181 +config GRKERNSEC_RESLOG
50182 + bool "Resource logging"
50183 + help
50184 + If you say Y here, all attempts to overstep resource limits will
50185 + be logged with the resource name, the requested size, and the current
50186 + limit. It is highly recommended that you say Y here. If the sysctl
50187 + option is enabled, a sysctl option with name "resource_logging" is
50188 + created. If the RBAC system is enabled, the sysctl value is ignored.
50189 +
50190 +config GRKERNSEC_CHROOT_EXECLOG
50191 + bool "Log execs within chroot"
50192 + help
50193 + If you say Y here, all executions inside a chroot jail will be logged
50194 + to syslog. This can cause a large amount of logs if certain
50195 + applications (eg. djb's daemontools) are installed on the system, and
50196 + is therefore left as an option. If the sysctl option is enabled, a
50197 + sysctl option with name "chroot_execlog" is created.
50198 +
50199 +config GRKERNSEC_AUDIT_PTRACE
50200 + bool "Ptrace logging"
50201 + help
50202 + If you say Y here, all attempts to attach to a process via ptrace
50203 + will be logged. If the sysctl option is enabled, a sysctl option
50204 + with name "audit_ptrace" is created.
50205 +
50206 +config GRKERNSEC_AUDIT_CHDIR
50207 + bool "Chdir logging"
50208 + help
50209 + If you say Y here, all chdir() calls will be logged. If the sysctl
50210 + option is enabled, a sysctl option with name "audit_chdir" is created.
50211 +
50212 +config GRKERNSEC_AUDIT_MOUNT
50213 + bool "(Un)Mount logging"
50214 + help
50215 + If you say Y here, all mounts and unmounts will be logged. If the
50216 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50217 + created.
50218 +
50219 +config GRKERNSEC_SIGNAL
50220 + bool "Signal logging"
50221 + help
50222 + If you say Y here, certain important signals will be logged, such as
50223 + SIGSEGV, which will as a result inform you of when a error in a program
50224 + occurred, which in some cases could mean a possible exploit attempt.
50225 + If the sysctl option is enabled, a sysctl option with name
50226 + "signal_logging" is created.
50227 +
50228 +config GRKERNSEC_FORKFAIL
50229 + bool "Fork failure logging"
50230 + help
50231 + If you say Y here, all failed fork() attempts will be logged.
50232 + This could suggest a fork bomb, or someone attempting to overstep
50233 + their process limit. If the sysctl option is enabled, a sysctl option
50234 + with name "forkfail_logging" is created.
50235 +
50236 +config GRKERNSEC_TIME
50237 + bool "Time change logging"
50238 + help
50239 + If you say Y here, any changes of the system clock will be logged.
50240 + If the sysctl option is enabled, a sysctl option with name
50241 + "timechange_logging" is created.
50242 +
50243 +config GRKERNSEC_PROC_IPADDR
50244 + bool "/proc/<pid>/ipaddr support"
50245 + help
50246 + If you say Y here, a new entry will be added to each /proc/<pid>
50247 + directory that contains the IP address of the person using the task.
50248 + The IP is carried across local TCP and AF_UNIX stream sockets.
50249 + This information can be useful for IDS/IPSes to perform remote response
50250 + to a local attack. The entry is readable by only the owner of the
50251 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50252 + the RBAC system), and thus does not create privacy concerns.
50253 +
50254 +config GRKERNSEC_RWXMAP_LOG
50255 + bool 'Denied RWX mmap/mprotect logging'
50256 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50257 + help
50258 + If you say Y here, calls to mmap() and mprotect() with explicit
50259 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50260 + denied by the PAX_MPROTECT feature. If the sysctl option is
50261 + enabled, a sysctl option with name "rwxmap_logging" is created.
50262 +
50263 +config GRKERNSEC_AUDIT_TEXTREL
50264 + bool 'ELF text relocations logging (READ HELP)'
50265 + depends on PAX_MPROTECT
50266 + help
50267 + If you say Y here, text relocations will be logged with the filename
50268 + of the offending library or binary. The purpose of the feature is
50269 + to help Linux distribution developers get rid of libraries and
50270 + binaries that need text relocations which hinder the future progress
50271 + of PaX. Only Linux distribution developers should say Y here, and
50272 + never on a production machine, as this option creates an information
50273 + leak that could aid an attacker in defeating the randomization of
50274 + a single memory region. If the sysctl option is enabled, a sysctl
50275 + option with name "audit_textrel" is created.
50276 +
50277 +endmenu
50278 +
50279 +menu "Executable Protections"
50280 +depends on GRKERNSEC
50281 +
50282 +config GRKERNSEC_DMESG
50283 + bool "Dmesg(8) restriction"
50284 + help
50285 + If you say Y here, non-root users will not be able to use dmesg(8)
50286 + to view up to the last 4kb of messages in the kernel's log buffer.
50287 + The kernel's log buffer often contains kernel addresses and other
50288 + identifying information useful to an attacker in fingerprinting a
50289 + system for a targeted exploit.
50290 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50291 + created.
50292 +
50293 +config GRKERNSEC_HARDEN_PTRACE
50294 + bool "Deter ptrace-based process snooping"
50295 + help
50296 + If you say Y here, TTY sniffers and other malicious monitoring
50297 + programs implemented through ptrace will be defeated. If you
50298 + have been using the RBAC system, this option has already been
50299 + enabled for several years for all users, with the ability to make
50300 + fine-grained exceptions.
50301 +
50302 + This option only affects the ability of non-root users to ptrace
50303 + processes that are not a descendent of the ptracing process.
50304 + This means that strace ./binary and gdb ./binary will still work,
50305 + but attaching to arbitrary processes will not. If the sysctl
50306 + option is enabled, a sysctl option with name "harden_ptrace" is
50307 + created.
50308 +
50309 +config GRKERNSEC_PTRACE_READEXEC
50310 + bool "Require read access to ptrace sensitive binaries"
50311 + help
50312 + If you say Y here, unprivileged users will not be able to ptrace unreadable
50313 + binaries. This option is useful in environments that
50314 + remove the read bits (e.g. file mode 4711) from suid binaries to
50315 + prevent infoleaking of their contents. This option adds
50316 + consistency to the use of that file mode, as the binary could normally
50317 + be read out when run without privileges while ptracing.
50318 +
50319 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50320 + is created.
50321 +
50322 +config GRKERNSEC_SETXID
50323 + bool "Enforce consistent multithreaded privileges"
50324 + help
50325 + If you say Y here, a change from a root uid to a non-root uid
50326 + in a multithreaded application will cause the resulting uids,
50327 + gids, supplementary groups, and capabilities in that thread
50328 + to be propagated to the other threads of the process. In most
50329 + cases this is unnecessary, as glibc will emulate this behavior
50330 + on behalf of the application. Other libcs do not act in the
50331 + same way, allowing the other threads of the process to continue
50332 + running with root privileges. If the sysctl option is enabled,
50333 + a sysctl option with name "consistent_setxid" is created.
50334 +
50335 +config GRKERNSEC_TPE
50336 + bool "Trusted Path Execution (TPE)"
50337 + help
50338 + If you say Y here, you will be able to choose a gid to add to the
50339 + supplementary groups of users you want to mark as "untrusted."
50340 + These users will not be able to execute any files that are not in
50341 + root-owned directories writable only by root. If the sysctl option
50342 + is enabled, a sysctl option with name "tpe" is created.
50343 +
50344 +config GRKERNSEC_TPE_ALL
50345 + bool "Partially restrict all non-root users"
50346 + depends on GRKERNSEC_TPE
50347 + help
50348 + If you say Y here, all non-root users will be covered under
50349 + a weaker TPE restriction. This is separate from, and in addition to,
50350 + the main TPE options that you have selected elsewhere. Thus, if a
50351 + "trusted" GID is chosen, this restriction applies to even that GID.
50352 + Under this restriction, all non-root users will only be allowed to
50353 + execute files in directories they own that are not group or
50354 + world-writable, or in directories owned by root and writable only by
50355 + root. If the sysctl option is enabled, a sysctl option with name
50356 + "tpe_restrict_all" is created.
50357 +
50358 +config GRKERNSEC_TPE_INVERT
50359 + bool "Invert GID option"
50360 + depends on GRKERNSEC_TPE
50361 + help
50362 + If you say Y here, the group you specify in the TPE configuration will
50363 + decide what group TPE restrictions will be *disabled* for. This
50364 + option is useful if you want TPE restrictions to be applied to most
50365 + users on the system. If the sysctl option is enabled, a sysctl option
50366 + with name "tpe_invert" is created. Unlike other sysctl options, this
50367 + entry will default to on for backward-compatibility.
50368 +
50369 +config GRKERNSEC_TPE_GID
50370 + int "GID for untrusted users"
50371 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50372 + default 1005
50373 + help
50374 + Setting this GID determines what group TPE restrictions will be
50375 + *enabled* for. If the sysctl option is enabled, a sysctl option
50376 + with name "tpe_gid" is created.
50377 +
50378 +config GRKERNSEC_TPE_GID
50379 + int "GID for trusted users"
50380 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50381 + default 1005
50382 + help
50383 + Setting this GID determines what group TPE restrictions will be
50384 + *disabled* for. If the sysctl option is enabled, a sysctl option
50385 + with name "tpe_gid" is created.
50386 +
50387 +endmenu
50388 +menu "Network Protections"
50389 +depends on GRKERNSEC
50390 +
50391 +config GRKERNSEC_RANDNET
50392 + bool "Larger entropy pools"
50393 + help
50394 + If you say Y here, the entropy pools used for many features of Linux
50395 + and grsecurity will be doubled in size. Since several grsecurity
50396 + features use additional randomness, it is recommended that you say Y
50397 + here. Saying Y here has a similar effect as modifying
50398 + /proc/sys/kernel/random/poolsize.
50399 +
50400 +config GRKERNSEC_BLACKHOLE
50401 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50402 + depends on NET
50403 + help
50404 + If you say Y here, neither TCP resets nor ICMP
50405 + destination-unreachable packets will be sent in response to packets
50406 + sent to ports for which no associated listening process exists.
50407 + This feature supports both IPV4 and IPV6 and exempts the
50408 + loopback interface from blackholing. Enabling this feature
50409 + makes a host more resilient to DoS attacks and reduces network
50410 + visibility against scanners.
50411 +
50412 + The blackhole feature as-implemented is equivalent to the FreeBSD
50413 + blackhole feature, as it prevents RST responses to all packets, not
50414 + just SYNs. Under most application behavior this causes no
50415 + problems, but applications (like haproxy) may not close certain
50416 + connections in a way that cleanly terminates them on the remote
50417 + end, leaving the remote host in LAST_ACK state. Because of this
50418 + side-effect and to prevent intentional LAST_ACK DoSes, this
50419 + feature also adds automatic mitigation against such attacks.
50420 + The mitigation drastically reduces the amount of time a socket
50421 + can spend in LAST_ACK state. If you're using haproxy and not
50422 + all servers it connects to have this option enabled, consider
50423 + disabling this feature on the haproxy host.
50424 +
50425 + If the sysctl option is enabled, two sysctl options with names
50426 + "ip_blackhole" and "lastack_retries" will be created.
50427 + While "ip_blackhole" takes the standard zero/non-zero on/off
50428 + toggle, "lastack_retries" uses the same kinds of values as
50429 + "tcp_retries1" and "tcp_retries2". The default value of 4
50430 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50431 + state.
50432 +
50433 +config GRKERNSEC_SOCKET
50434 + bool "Socket restrictions"
50435 + depends on NET
50436 + help
50437 + If you say Y here, you will be able to choose from several options.
50438 + If you assign a GID on your system and add it to the supplementary
50439 + groups of users you want to restrict socket access to, this patch
50440 + will perform up to three things, based on the option(s) you choose.
50441 +
50442 +config GRKERNSEC_SOCKET_ALL
50443 + bool "Deny any sockets to group"
50444 + depends on GRKERNSEC_SOCKET
50445 + help
50446 + If you say Y here, you will be able to choose a GID of whose users will
50447 + be unable to connect to other hosts from your machine or run server
50448 + applications from your machine. If the sysctl option is enabled, a
50449 + sysctl option with name "socket_all" is created.
50450 +
50451 +config GRKERNSEC_SOCKET_ALL_GID
50452 + int "GID to deny all sockets for"
50453 + depends on GRKERNSEC_SOCKET_ALL
50454 + default 1004
50455 + help
50456 + Here you can choose the GID to disable socket access for. Remember to
50457 + add the users you want socket access disabled for to the GID
50458 + specified here. If the sysctl option is enabled, a sysctl option
50459 + with name "socket_all_gid" is created.
50460 +
50461 +config GRKERNSEC_SOCKET_CLIENT
50462 + bool "Deny client sockets to group"
50463 + depends on GRKERNSEC_SOCKET
50464 + help
50465 + If you say Y here, you will be able to choose a GID of whose users will
50466 + be unable to connect to other hosts from your machine, but will be
50467 + able to run servers. If this option is enabled, all users in the group
50468 + you specify will have to use passive mode when initiating ftp transfers
50469 + from the shell on your machine. If the sysctl option is enabled, a
50470 + sysctl option with name "socket_client" is created.
50471 +
50472 +config GRKERNSEC_SOCKET_CLIENT_GID
50473 + int "GID to deny client sockets for"
50474 + depends on GRKERNSEC_SOCKET_CLIENT
50475 + default 1003
50476 + help
50477 + Here you can choose the GID to disable client socket access for.
50478 + Remember to add the users you want client socket access disabled for to
50479 + the GID specified here. If the sysctl option is enabled, a sysctl
50480 + option with name "socket_client_gid" is created.
50481 +
50482 +config GRKERNSEC_SOCKET_SERVER
50483 + bool "Deny server sockets to group"
50484 + depends on GRKERNSEC_SOCKET
50485 + help
50486 + If you say Y here, you will be able to choose a GID of whose users will
50487 + be unable to run server applications from your machine. If the sysctl
50488 + option is enabled, a sysctl option with name "socket_server" is created.
50489 +
50490 +config GRKERNSEC_SOCKET_SERVER_GID
50491 + int "GID to deny server sockets for"
50492 + depends on GRKERNSEC_SOCKET_SERVER
50493 + default 1002
50494 + help
50495 + Here you can choose the GID to disable server socket access for.
50496 + Remember to add the users you want server socket access disabled for to
50497 + the GID specified here. If the sysctl option is enabled, a sysctl
50498 + option with name "socket_server_gid" is created.
50499 +
50500 +endmenu
50501 +menu "Sysctl support"
50502 +depends on GRKERNSEC && SYSCTL
50503 +
50504 +config GRKERNSEC_SYSCTL
50505 + bool "Sysctl support"
50506 + help
50507 + If you say Y here, you will be able to change the options that
50508 + grsecurity runs with at bootup, without having to recompile your
50509 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50510 + to enable (1) or disable (0) various features. All the sysctl entries
50511 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50512 + All features enabled in the kernel configuration are disabled at boot
50513 + if you do not say Y to the "Turn on features by default" option.
50514 + All options should be set at startup, and the grsec_lock entry should
50515 + be set to a non-zero value after all the options are set.
50516 + *THIS IS EXTREMELY IMPORTANT*
50517 +
50518 +config GRKERNSEC_SYSCTL_DISTRO
50519 + bool "Extra sysctl support for distro makers (READ HELP)"
50520 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50521 + help
50522 + If you say Y here, additional sysctl options will be created
50523 + for features that affect processes running as root. Therefore,
50524 + it is critical when using this option that the grsec_lock entry be
50525 + enabled after boot. Only distros with prebuilt kernel packages
50526 + with this option enabled that can ensure grsec_lock is enabled
50527 + after boot should use this option.
50528 + *Failure to set grsec_lock after boot makes all grsec features
50529 + this option covers useless*
50530 +
50531 + Currently this option creates the following sysctl entries:
50532 + "Disable Privileged I/O": "disable_priv_io"
50533 +
50534 +config GRKERNSEC_SYSCTL_ON
50535 + bool "Turn on features by default"
50536 + depends on GRKERNSEC_SYSCTL
50537 + help
50538 + If you say Y here, instead of having all features enabled in the
50539 + kernel configuration disabled at boot time, the features will be
50540 + enabled at boot time. It is recommended you say Y here unless
50541 + there is some reason you would want all sysctl-tunable features to
50542 + be disabled by default. As mentioned elsewhere, it is important
50543 + to enable the grsec_lock entry once you have finished modifying
50544 + the sysctl entries.
50545 +
50546 +endmenu
50547 +menu "Logging Options"
50548 +depends on GRKERNSEC
50549 +
50550 +config GRKERNSEC_FLOODTIME
50551 + int "Seconds in between log messages (minimum)"
50552 + default 10
50553 + help
50554 + This option allows you to enforce the number of seconds between
50555 + grsecurity log messages. The default should be suitable for most
50556 + people, however, if you choose to change it, choose a value small enough
50557 + to allow informative logs to be produced, but large enough to
50558 + prevent flooding.
50559 +
50560 +config GRKERNSEC_FLOODBURST
50561 + int "Number of messages in a burst (maximum)"
50562 + default 6
50563 + help
50564 + This option allows you to choose the maximum number of messages allowed
50565 + within the flood time interval you chose in a separate option. The
50566 + default should be suitable for most people, however if you find that
50567 + many of your logs are being interpreted as flooding, you may want to
50568 + raise this value.
50569 +
50570 +endmenu
50571 +
50572 +endmenu
50573 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50574 new file mode 100644
50575 index 0000000..1b9afa9
50576 --- /dev/null
50577 +++ b/grsecurity/Makefile
50578 @@ -0,0 +1,38 @@
50579 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50580 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50581 +# into an RBAC system
50582 +#
50583 +# All code in this directory and various hooks inserted throughout the kernel
50584 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50585 +# under the GPL v2 or higher
50586 +
50587 +KBUILD_CFLAGS += -Werror
50588 +
50589 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50590 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50591 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50592 +
50593 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50594 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50595 + gracl_learn.o grsec_log.o
50596 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50597 +
50598 +ifdef CONFIG_NET
50599 +obj-y += grsec_sock.o
50600 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50601 +endif
50602 +
50603 +ifndef CONFIG_GRKERNSEC
50604 +obj-y += grsec_disabled.o
50605 +endif
50606 +
50607 +ifdef CONFIG_GRKERNSEC_HIDESYM
50608 +extra-y := grsec_hidesym.o
50609 +$(obj)/grsec_hidesym.o:
50610 + @-chmod -f 500 /boot
50611 + @-chmod -f 500 /lib/modules
50612 + @-chmod -f 500 /lib64/modules
50613 + @-chmod -f 500 /lib32/modules
50614 + @-chmod -f 700 .
50615 + @echo ' grsec: protected kernel image paths'
50616 +endif
50617 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50618 new file mode 100644
50619 index 0000000..42813ac
50620 --- /dev/null
50621 +++ b/grsecurity/gracl.c
50622 @@ -0,0 +1,4192 @@
50623 +#include <linux/kernel.h>
50624 +#include <linux/module.h>
50625 +#include <linux/sched.h>
50626 +#include <linux/mm.h>
50627 +#include <linux/file.h>
50628 +#include <linux/fs.h>
50629 +#include <linux/namei.h>
50630 +#include <linux/mount.h>
50631 +#include <linux/tty.h>
50632 +#include <linux/proc_fs.h>
50633 +#include <linux/lglock.h>
50634 +#include <linux/slab.h>
50635 +#include <linux/vmalloc.h>
50636 +#include <linux/types.h>
50637 +#include <linux/sysctl.h>
50638 +#include <linux/netdevice.h>
50639 +#include <linux/ptrace.h>
50640 +#include <linux/gracl.h>
50641 +#include <linux/gralloc.h>
50642 +#include <linux/security.h>
50643 +#include <linux/grinternal.h>
50644 +#include <linux/pid_namespace.h>
50645 +#include <linux/fdtable.h>
50646 +#include <linux/percpu.h>
50647 +#include "../fs/mount.h"
50648 +
50649 +#include <asm/uaccess.h>
50650 +#include <asm/errno.h>
50651 +#include <asm/mman.h>
50652 +
50653 +static struct acl_role_db acl_role_set;
50654 +static struct name_db name_set;
50655 +static struct inodev_db inodev_set;
50656 +
50657 +/* for keeping track of userspace pointers used for subjects, so we
50658 + can share references in the kernel as well
50659 +*/
50660 +
50661 +static struct path real_root;
50662 +
50663 +static struct acl_subj_map_db subj_map_set;
50664 +
50665 +static struct acl_role_label *default_role;
50666 +
50667 +static struct acl_role_label *role_list;
50668 +
50669 +static u16 acl_sp_role_value;
50670 +
50671 +extern char *gr_shared_page[4];
50672 +static DEFINE_MUTEX(gr_dev_mutex);
50673 +DEFINE_RWLOCK(gr_inode_lock);
50674 +
50675 +struct gr_arg *gr_usermode;
50676 +
50677 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50678 +
50679 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50680 +extern void gr_clear_learn_entries(void);
50681 +
50682 +#ifdef CONFIG_GRKERNSEC_RESLOG
50683 +extern void gr_log_resource(const struct task_struct *task,
50684 + const int res, const unsigned long wanted, const int gt);
50685 +#endif
50686 +
50687 +unsigned char *gr_system_salt;
50688 +unsigned char *gr_system_sum;
50689 +
50690 +static struct sprole_pw **acl_special_roles = NULL;
50691 +static __u16 num_sprole_pws = 0;
50692 +
50693 +static struct acl_role_label *kernel_role = NULL;
50694 +
50695 +static unsigned int gr_auth_attempts = 0;
50696 +static unsigned long gr_auth_expires = 0UL;
50697 +
50698 +#ifdef CONFIG_NET
50699 +extern struct vfsmount *sock_mnt;
50700 +#endif
50701 +
50702 +extern struct vfsmount *pipe_mnt;
50703 +extern struct vfsmount *shm_mnt;
50704 +#ifdef CONFIG_HUGETLBFS
50705 +extern struct vfsmount *hugetlbfs_vfsmount;
50706 +#endif
50707 +
50708 +static struct acl_object_label *fakefs_obj_rw;
50709 +static struct acl_object_label *fakefs_obj_rwx;
50710 +
50711 +extern int gr_init_uidset(void);
50712 +extern void gr_free_uidset(void);
50713 +extern void gr_remove_uid(uid_t uid);
50714 +extern int gr_find_uid(uid_t uid);
50715 +
50716 +DECLARE_BRLOCK(vfsmount_lock);
50717 +
50718 +__inline__ int
50719 +gr_acl_is_enabled(void)
50720 +{
50721 + return (gr_status & GR_READY);
50722 +}
50723 +
50724 +#ifdef CONFIG_BTRFS_FS
50725 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50726 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50727 +#endif
50728 +
50729 +static inline dev_t __get_dev(const struct dentry *dentry)
50730 +{
50731 +#ifdef CONFIG_BTRFS_FS
50732 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50733 + return get_btrfs_dev_from_inode(dentry->d_inode);
50734 + else
50735 +#endif
50736 + return dentry->d_inode->i_sb->s_dev;
50737 +}
50738 +
50739 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50740 +{
50741 + return __get_dev(dentry);
50742 +}
50743 +
50744 +static char gr_task_roletype_to_char(struct task_struct *task)
50745 +{
50746 + switch (task->role->roletype &
50747 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50748 + GR_ROLE_SPECIAL)) {
50749 + case GR_ROLE_DEFAULT:
50750 + return 'D';
50751 + case GR_ROLE_USER:
50752 + return 'U';
50753 + case GR_ROLE_GROUP:
50754 + return 'G';
50755 + case GR_ROLE_SPECIAL:
50756 + return 'S';
50757 + }
50758 +
50759 + return 'X';
50760 +}
50761 +
50762 +char gr_roletype_to_char(void)
50763 +{
50764 + return gr_task_roletype_to_char(current);
50765 +}
50766 +
50767 +__inline__ int
50768 +gr_acl_tpe_check(void)
50769 +{
50770 + if (unlikely(!(gr_status & GR_READY)))
50771 + return 0;
50772 + if (current->role->roletype & GR_ROLE_TPE)
50773 + return 1;
50774 + else
50775 + return 0;
50776 +}
50777 +
50778 +int
50779 +gr_handle_rawio(const struct inode *inode)
50780 +{
50781 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50782 + if (inode && S_ISBLK(inode->i_mode) &&
50783 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50784 + !capable(CAP_SYS_RAWIO))
50785 + return 1;
50786 +#endif
50787 + return 0;
50788 +}
50789 +
50790 +static int
50791 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50792 +{
50793 + if (likely(lena != lenb))
50794 + return 0;
50795 +
50796 + return !memcmp(a, b, lena);
50797 +}
50798 +
50799 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50800 +{
50801 + *buflen -= namelen;
50802 + if (*buflen < 0)
50803 + return -ENAMETOOLONG;
50804 + *buffer -= namelen;
50805 + memcpy(*buffer, str, namelen);
50806 + return 0;
50807 +}
50808 +
50809 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50810 +{
50811 + return prepend(buffer, buflen, name->name, name->len);
50812 +}
50813 +
50814 +static int prepend_path(const struct path *path, struct path *root,
50815 + char **buffer, int *buflen)
50816 +{
50817 + struct dentry *dentry = path->dentry;
50818 + struct vfsmount *vfsmnt = path->mnt;
50819 + struct mount *mnt = real_mount(vfsmnt);
50820 + bool slash = false;
50821 + int error = 0;
50822 +
50823 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50824 + struct dentry * parent;
50825 +
50826 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50827 + /* Global root? */
50828 + if (!mnt_has_parent(mnt)) {
50829 + goto out;
50830 + }
50831 + dentry = mnt->mnt_mountpoint;
50832 + mnt = mnt->mnt_parent;
50833 + vfsmnt = &mnt->mnt;
50834 + continue;
50835 + }
50836 + parent = dentry->d_parent;
50837 + prefetch(parent);
50838 + spin_lock(&dentry->d_lock);
50839 + error = prepend_name(buffer, buflen, &dentry->d_name);
50840 + spin_unlock(&dentry->d_lock);
50841 + if (!error)
50842 + error = prepend(buffer, buflen, "/", 1);
50843 + if (error)
50844 + break;
50845 +
50846 + slash = true;
50847 + dentry = parent;
50848 + }
50849 +
50850 +out:
50851 + if (!error && !slash)
50852 + error = prepend(buffer, buflen, "/", 1);
50853 +
50854 + return error;
50855 +}
50856 +
50857 +/* this must be called with vfsmount_lock and rename_lock held */
50858 +
50859 +static char *__our_d_path(const struct path *path, struct path *root,
50860 + char *buf, int buflen)
50861 +{
50862 + char *res = buf + buflen;
50863 + int error;
50864 +
50865 + prepend(&res, &buflen, "\0", 1);
50866 + error = prepend_path(path, root, &res, &buflen);
50867 + if (error)
50868 + return ERR_PTR(error);
50869 +
50870 + return res;
50871 +}
50872 +
50873 +static char *
50874 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50875 +{
50876 + char *retval;
50877 +
50878 + retval = __our_d_path(path, root, buf, buflen);
50879 + if (unlikely(IS_ERR(retval)))
50880 + retval = strcpy(buf, "<path too long>");
50881 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50882 + retval[1] = '\0';
50883 +
50884 + return retval;
50885 +}
50886 +
50887 +static char *
50888 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50889 + char *buf, int buflen)
50890 +{
50891 + struct path path;
50892 + char *res;
50893 +
50894 + path.dentry = (struct dentry *)dentry;
50895 + path.mnt = (struct vfsmount *)vfsmnt;
50896 +
50897 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50898 + by the RBAC system */
50899 + res = gen_full_path(&path, &real_root, buf, buflen);
50900 +
50901 + return res;
50902 +}
50903 +
50904 +static char *
50905 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50906 + char *buf, int buflen)
50907 +{
50908 + char *res;
50909 + struct path path;
50910 + struct path root;
50911 + struct task_struct *reaper = &init_task;
50912 +
50913 + path.dentry = (struct dentry *)dentry;
50914 + path.mnt = (struct vfsmount *)vfsmnt;
50915 +
50916 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50917 + get_fs_root(reaper->fs, &root);
50918 +
50919 + write_seqlock(&rename_lock);
50920 + br_read_lock(vfsmount_lock);
50921 + res = gen_full_path(&path, &root, buf, buflen);
50922 + br_read_unlock(vfsmount_lock);
50923 + write_sequnlock(&rename_lock);
50924 +
50925 + path_put(&root);
50926 + return res;
50927 +}
50928 +
50929 +static char *
50930 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50931 +{
50932 + char *ret;
50933 + write_seqlock(&rename_lock);
50934 + br_read_lock(vfsmount_lock);
50935 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50936 + PAGE_SIZE);
50937 + br_read_unlock(vfsmount_lock);
50938 + write_sequnlock(&rename_lock);
50939 + return ret;
50940 +}
50941 +
50942 +static char *
50943 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50944 +{
50945 + char *ret;
50946 + char *buf;
50947 + int buflen;
50948 +
50949 + write_seqlock(&rename_lock);
50950 + br_read_lock(vfsmount_lock);
50951 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50952 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50953 + buflen = (int)(ret - buf);
50954 + if (buflen >= 5)
50955 + prepend(&ret, &buflen, "/proc", 5);
50956 + else
50957 + ret = strcpy(buf, "<path too long>");
50958 + br_read_unlock(vfsmount_lock);
50959 + write_sequnlock(&rename_lock);
50960 + return ret;
50961 +}
50962 +
50963 +char *
50964 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50965 +{
50966 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50967 + PAGE_SIZE);
50968 +}
50969 +
50970 +char *
50971 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50972 +{
50973 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50974 + PAGE_SIZE);
50975 +}
50976 +
50977 +char *
50978 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50979 +{
50980 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50981 + PAGE_SIZE);
50982 +}
50983 +
50984 +char *
50985 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50986 +{
50987 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50988 + PAGE_SIZE);
50989 +}
50990 +
50991 +char *
50992 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50993 +{
50994 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50995 + PAGE_SIZE);
50996 +}
50997 +
50998 +__inline__ __u32
50999 +to_gr_audit(const __u32 reqmode)
51000 +{
51001 + /* masks off auditable permission flags, then shifts them to create
51002 + auditing flags, and adds the special case of append auditing if
51003 + we're requesting write */
51004 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51005 +}
51006 +
51007 +struct acl_subject_label *
51008 +lookup_subject_map(const struct acl_subject_label *userp)
51009 +{
51010 + unsigned int index = shash(userp, subj_map_set.s_size);
51011 + struct subject_map *match;
51012 +
51013 + match = subj_map_set.s_hash[index];
51014 +
51015 + while (match && match->user != userp)
51016 + match = match->next;
51017 +
51018 + if (match != NULL)
51019 + return match->kernel;
51020 + else
51021 + return NULL;
51022 +}
51023 +
51024 +static void
51025 +insert_subj_map_entry(struct subject_map *subjmap)
51026 +{
51027 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51028 + struct subject_map **curr;
51029 +
51030 + subjmap->prev = NULL;
51031 +
51032 + curr = &subj_map_set.s_hash[index];
51033 + if (*curr != NULL)
51034 + (*curr)->prev = subjmap;
51035 +
51036 + subjmap->next = *curr;
51037 + *curr = subjmap;
51038 +
51039 + return;
51040 +}
51041 +
51042 +static struct acl_role_label *
51043 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51044 + const gid_t gid)
51045 +{
51046 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51047 + struct acl_role_label *match;
51048 + struct role_allowed_ip *ipp;
51049 + unsigned int x;
51050 + u32 curr_ip = task->signal->curr_ip;
51051 +
51052 + task->signal->saved_ip = curr_ip;
51053 +
51054 + match = acl_role_set.r_hash[index];
51055 +
51056 + while (match) {
51057 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51058 + for (x = 0; x < match->domain_child_num; x++) {
51059 + if (match->domain_children[x] == uid)
51060 + goto found;
51061 + }
51062 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51063 + break;
51064 + match = match->next;
51065 + }
51066 +found:
51067 + if (match == NULL) {
51068 + try_group:
51069 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51070 + match = acl_role_set.r_hash[index];
51071 +
51072 + while (match) {
51073 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51074 + for (x = 0; x < match->domain_child_num; x++) {
51075 + if (match->domain_children[x] == gid)
51076 + goto found2;
51077 + }
51078 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51079 + break;
51080 + match = match->next;
51081 + }
51082 +found2:
51083 + if (match == NULL)
51084 + match = default_role;
51085 + if (match->allowed_ips == NULL)
51086 + return match;
51087 + else {
51088 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51089 + if (likely
51090 + ((ntohl(curr_ip) & ipp->netmask) ==
51091 + (ntohl(ipp->addr) & ipp->netmask)))
51092 + return match;
51093 + }
51094 + match = default_role;
51095 + }
51096 + } else if (match->allowed_ips == NULL) {
51097 + return match;
51098 + } else {
51099 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51100 + if (likely
51101 + ((ntohl(curr_ip) & ipp->netmask) ==
51102 + (ntohl(ipp->addr) & ipp->netmask)))
51103 + return match;
51104 + }
51105 + goto try_group;
51106 + }
51107 +
51108 + return match;
51109 +}
51110 +
51111 +struct acl_subject_label *
51112 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51113 + const struct acl_role_label *role)
51114 +{
51115 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51116 + struct acl_subject_label *match;
51117 +
51118 + match = role->subj_hash[index];
51119 +
51120 + while (match && (match->inode != ino || match->device != dev ||
51121 + (match->mode & GR_DELETED))) {
51122 + match = match->next;
51123 + }
51124 +
51125 + if (match && !(match->mode & GR_DELETED))
51126 + return match;
51127 + else
51128 + return NULL;
51129 +}
51130 +
51131 +struct acl_subject_label *
51132 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51133 + const struct acl_role_label *role)
51134 +{
51135 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51136 + struct acl_subject_label *match;
51137 +
51138 + match = role->subj_hash[index];
51139 +
51140 + while (match && (match->inode != ino || match->device != dev ||
51141 + !(match->mode & GR_DELETED))) {
51142 + match = match->next;
51143 + }
51144 +
51145 + if (match && (match->mode & GR_DELETED))
51146 + return match;
51147 + else
51148 + return NULL;
51149 +}
51150 +
51151 +static struct acl_object_label *
51152 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51153 + const struct acl_subject_label *subj)
51154 +{
51155 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51156 + struct acl_object_label *match;
51157 +
51158 + match = subj->obj_hash[index];
51159 +
51160 + while (match && (match->inode != ino || match->device != dev ||
51161 + (match->mode & GR_DELETED))) {
51162 + match = match->next;
51163 + }
51164 +
51165 + if (match && !(match->mode & GR_DELETED))
51166 + return match;
51167 + else
51168 + return NULL;
51169 +}
51170 +
51171 +static struct acl_object_label *
51172 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51173 + const struct acl_subject_label *subj)
51174 +{
51175 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51176 + struct acl_object_label *match;
51177 +
51178 + match = subj->obj_hash[index];
51179 +
51180 + while (match && (match->inode != ino || match->device != dev ||
51181 + !(match->mode & GR_DELETED))) {
51182 + match = match->next;
51183 + }
51184 +
51185 + if (match && (match->mode & GR_DELETED))
51186 + return match;
51187 +
51188 + match = subj->obj_hash[index];
51189 +
51190 + while (match && (match->inode != ino || match->device != dev ||
51191 + (match->mode & GR_DELETED))) {
51192 + match = match->next;
51193 + }
51194 +
51195 + if (match && !(match->mode & GR_DELETED))
51196 + return match;
51197 + else
51198 + return NULL;
51199 +}
51200 +
51201 +static struct name_entry *
51202 +lookup_name_entry(const char *name)
51203 +{
51204 + unsigned int len = strlen(name);
51205 + unsigned int key = full_name_hash(name, len);
51206 + unsigned int index = key % name_set.n_size;
51207 + struct name_entry *match;
51208 +
51209 + match = name_set.n_hash[index];
51210 +
51211 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51212 + match = match->next;
51213 +
51214 + return match;
51215 +}
51216 +
51217 +static struct name_entry *
51218 +lookup_name_entry_create(const char *name)
51219 +{
51220 + unsigned int len = strlen(name);
51221 + unsigned int key = full_name_hash(name, len);
51222 + unsigned int index = key % name_set.n_size;
51223 + struct name_entry *match;
51224 +
51225 + match = name_set.n_hash[index];
51226 +
51227 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51228 + !match->deleted))
51229 + match = match->next;
51230 +
51231 + if (match && match->deleted)
51232 + return match;
51233 +
51234 + match = name_set.n_hash[index];
51235 +
51236 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51237 + match->deleted))
51238 + match = match->next;
51239 +
51240 + if (match && !match->deleted)
51241 + return match;
51242 + else
51243 + return NULL;
51244 +}
51245 +
51246 +static struct inodev_entry *
51247 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51248 +{
51249 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51250 + struct inodev_entry *match;
51251 +
51252 + match = inodev_set.i_hash[index];
51253 +
51254 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51255 + match = match->next;
51256 +
51257 + return match;
51258 +}
51259 +
51260 +static void
51261 +insert_inodev_entry(struct inodev_entry *entry)
51262 +{
51263 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51264 + inodev_set.i_size);
51265 + struct inodev_entry **curr;
51266 +
51267 + entry->prev = NULL;
51268 +
51269 + curr = &inodev_set.i_hash[index];
51270 + if (*curr != NULL)
51271 + (*curr)->prev = entry;
51272 +
51273 + entry->next = *curr;
51274 + *curr = entry;
51275 +
51276 + return;
51277 +}
51278 +
51279 +static void
51280 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51281 +{
51282 + unsigned int index =
51283 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51284 + struct acl_role_label **curr;
51285 + struct acl_role_label *tmp, *tmp2;
51286 +
51287 + curr = &acl_role_set.r_hash[index];
51288 +
51289 + /* simple case, slot is empty, just set it to our role */
51290 + if (*curr == NULL) {
51291 + *curr = role;
51292 + } else {
51293 + /* example:
51294 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
51295 + 2 -> 3
51296 + */
51297 + /* first check to see if we can already be reached via this slot */
51298 + tmp = *curr;
51299 + while (tmp && tmp != role)
51300 + tmp = tmp->next;
51301 + if (tmp == role) {
51302 + /* we don't need to add ourselves to this slot's chain */
51303 + return;
51304 + }
51305 + /* we need to add ourselves to this chain, two cases */
51306 + if (role->next == NULL) {
51307 + /* simple case, append the current chain to our role */
51308 + role->next = *curr;
51309 + *curr = role;
51310 + } else {
51311 + /* 1 -> 2 -> 3 -> 4
51312 + 2 -> 3 -> 4
51313 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51314 + */
51315 + /* trickier case: walk our role's chain until we find
51316 + the role for the start of the current slot's chain */
51317 + tmp = role;
51318 + tmp2 = *curr;
51319 + while (tmp->next && tmp->next != tmp2)
51320 + tmp = tmp->next;
51321 + if (tmp->next == tmp2) {
51322 + /* from example above, we found 3, so just
51323 + replace this slot's chain with ours */
51324 + *curr = role;
51325 + } else {
51326 + /* we didn't find a subset of our role's chain
51327 + in the current slot's chain, so append their
51328 + chain to ours, and set us as the first role in
51329 + the slot's chain
51330 +
51331 + we could fold this case with the case above,
51332 + but making it explicit for clarity
51333 + */
51334 + tmp->next = tmp2;
51335 + *curr = role;
51336 + }
51337 + }
51338 + }
51339 +
51340 + return;
51341 +}
51342 +
51343 +static void
51344 +insert_acl_role_label(struct acl_role_label *role)
51345 +{
51346 + int i;
51347 +
51348 + if (role_list == NULL) {
51349 + role_list = role;
51350 + role->prev = NULL;
51351 + } else {
51352 + role->prev = role_list;
51353 + role_list = role;
51354 + }
51355 +
51356 + /* used for hash chains */
51357 + role->next = NULL;
51358 +
51359 + if (role->roletype & GR_ROLE_DOMAIN) {
51360 + for (i = 0; i < role->domain_child_num; i++)
51361 + __insert_acl_role_label(role, role->domain_children[i]);
51362 + } else
51363 + __insert_acl_role_label(role, role->uidgid);
51364 +}
51365 +
51366 +static int
51367 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51368 +{
51369 + struct name_entry **curr, *nentry;
51370 + struct inodev_entry *ientry;
51371 + unsigned int len = strlen(name);
51372 + unsigned int key = full_name_hash(name, len);
51373 + unsigned int index = key % name_set.n_size;
51374 +
51375 + curr = &name_set.n_hash[index];
51376 +
51377 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51378 + curr = &((*curr)->next);
51379 +
51380 + if (*curr != NULL)
51381 + return 1;
51382 +
51383 + nentry = acl_alloc(sizeof (struct name_entry));
51384 + if (nentry == NULL)
51385 + return 0;
51386 + ientry = acl_alloc(sizeof (struct inodev_entry));
51387 + if (ientry == NULL)
51388 + return 0;
51389 + ientry->nentry = nentry;
51390 +
51391 + nentry->key = key;
51392 + nentry->name = name;
51393 + nentry->inode = inode;
51394 + nentry->device = device;
51395 + nentry->len = len;
51396 + nentry->deleted = deleted;
51397 +
51398 + nentry->prev = NULL;
51399 + curr = &name_set.n_hash[index];
51400 + if (*curr != NULL)
51401 + (*curr)->prev = nentry;
51402 + nentry->next = *curr;
51403 + *curr = nentry;
51404 +
51405 + /* insert us into the table searchable by inode/dev */
51406 + insert_inodev_entry(ientry);
51407 +
51408 + return 1;
51409 +}
51410 +
51411 +static void
51412 +insert_acl_obj_label(struct acl_object_label *obj,
51413 + struct acl_subject_label *subj)
51414 +{
51415 + unsigned int index =
51416 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51417 + struct acl_object_label **curr;
51418 +
51419 +
51420 + obj->prev = NULL;
51421 +
51422 + curr = &subj->obj_hash[index];
51423 + if (*curr != NULL)
51424 + (*curr)->prev = obj;
51425 +
51426 + obj->next = *curr;
51427 + *curr = obj;
51428 +
51429 + return;
51430 +}
51431 +
51432 +static void
51433 +insert_acl_subj_label(struct acl_subject_label *obj,
51434 + struct acl_role_label *role)
51435 +{
51436 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51437 + struct acl_subject_label **curr;
51438 +
51439 + obj->prev = NULL;
51440 +
51441 + curr = &role->subj_hash[index];
51442 + if (*curr != NULL)
51443 + (*curr)->prev = obj;
51444 +
51445 + obj->next = *curr;
51446 + *curr = obj;
51447 +
51448 + return;
51449 +}
51450 +
51451 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51452 +
51453 +static void *
51454 +create_table(__u32 * len, int elementsize)
51455 +{
51456 + unsigned int table_sizes[] = {
51457 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51458 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51459 + 4194301, 8388593, 16777213, 33554393, 67108859
51460 + };
51461 + void *newtable = NULL;
51462 + unsigned int pwr = 0;
51463 +
51464 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51465 + table_sizes[pwr] <= *len)
51466 + pwr++;
51467 +
51468 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51469 + return newtable;
51470 +
51471 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51472 + newtable =
51473 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51474 + else
51475 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51476 +
51477 + *len = table_sizes[pwr];
51478 +
51479 + return newtable;
51480 +}
51481 +
51482 +static int
51483 +init_variables(const struct gr_arg *arg)
51484 +{
51485 + struct task_struct *reaper = &init_task;
51486 + unsigned int stacksize;
51487 +
51488 + subj_map_set.s_size = arg->role_db.num_subjects;
51489 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51490 + name_set.n_size = arg->role_db.num_objects;
51491 + inodev_set.i_size = arg->role_db.num_objects;
51492 +
51493 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51494 + !name_set.n_size || !inodev_set.i_size)
51495 + return 1;
51496 +
51497 + if (!gr_init_uidset())
51498 + return 1;
51499 +
51500 + /* set up the stack that holds allocation info */
51501 +
51502 + stacksize = arg->role_db.num_pointers + 5;
51503 +
51504 + if (!acl_alloc_stack_init(stacksize))
51505 + return 1;
51506 +
51507 + /* grab reference for the real root dentry and vfsmount */
51508 + get_fs_root(reaper->fs, &real_root);
51509 +
51510 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51511 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51512 +#endif
51513 +
51514 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51515 + if (fakefs_obj_rw == NULL)
51516 + return 1;
51517 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51518 +
51519 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51520 + if (fakefs_obj_rwx == NULL)
51521 + return 1;
51522 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51523 +
51524 + subj_map_set.s_hash =
51525 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51526 + acl_role_set.r_hash =
51527 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51528 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51529 + inodev_set.i_hash =
51530 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51531 +
51532 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51533 + !name_set.n_hash || !inodev_set.i_hash)
51534 + return 1;
51535 +
51536 + memset(subj_map_set.s_hash, 0,
51537 + sizeof(struct subject_map *) * subj_map_set.s_size);
51538 + memset(acl_role_set.r_hash, 0,
51539 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51540 + memset(name_set.n_hash, 0,
51541 + sizeof (struct name_entry *) * name_set.n_size);
51542 + memset(inodev_set.i_hash, 0,
51543 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51544 +
51545 + return 0;
51546 +}
51547 +
51548 +/* free information not needed after startup
51549 + currently contains user->kernel pointer mappings for subjects
51550 +*/
51551 +
51552 +static void
51553 +free_init_variables(void)
51554 +{
51555 + __u32 i;
51556 +
51557 + if (subj_map_set.s_hash) {
51558 + for (i = 0; i < subj_map_set.s_size; i++) {
51559 + if (subj_map_set.s_hash[i]) {
51560 + kfree(subj_map_set.s_hash[i]);
51561 + subj_map_set.s_hash[i] = NULL;
51562 + }
51563 + }
51564 +
51565 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51566 + PAGE_SIZE)
51567 + kfree(subj_map_set.s_hash);
51568 + else
51569 + vfree(subj_map_set.s_hash);
51570 + }
51571 +
51572 + return;
51573 +}
51574 +
51575 +static void
51576 +free_variables(void)
51577 +{
51578 + struct acl_subject_label *s;
51579 + struct acl_role_label *r;
51580 + struct task_struct *task, *task2;
51581 + unsigned int x;
51582 +
51583 + gr_clear_learn_entries();
51584 +
51585 + read_lock(&tasklist_lock);
51586 + do_each_thread(task2, task) {
51587 + task->acl_sp_role = 0;
51588 + task->acl_role_id = 0;
51589 + task->acl = NULL;
51590 + task->role = NULL;
51591 + } while_each_thread(task2, task);
51592 + read_unlock(&tasklist_lock);
51593 +
51594 + /* release the reference to the real root dentry and vfsmount */
51595 + path_put(&real_root);
51596 + memset(&real_root, 0, sizeof(real_root));
51597 +
51598 + /* free all object hash tables */
51599 +
51600 + FOR_EACH_ROLE_START(r)
51601 + if (r->subj_hash == NULL)
51602 + goto next_role;
51603 + FOR_EACH_SUBJECT_START(r, s, x)
51604 + if (s->obj_hash == NULL)
51605 + break;
51606 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51607 + kfree(s->obj_hash);
51608 + else
51609 + vfree(s->obj_hash);
51610 + FOR_EACH_SUBJECT_END(s, x)
51611 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51612 + if (s->obj_hash == NULL)
51613 + break;
51614 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51615 + kfree(s->obj_hash);
51616 + else
51617 + vfree(s->obj_hash);
51618 + FOR_EACH_NESTED_SUBJECT_END(s)
51619 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51620 + kfree(r->subj_hash);
51621 + else
51622 + vfree(r->subj_hash);
51623 + r->subj_hash = NULL;
51624 +next_role:
51625 + FOR_EACH_ROLE_END(r)
51626 +
51627 + acl_free_all();
51628 +
51629 + if (acl_role_set.r_hash) {
51630 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51631 + PAGE_SIZE)
51632 + kfree(acl_role_set.r_hash);
51633 + else
51634 + vfree(acl_role_set.r_hash);
51635 + }
51636 + if (name_set.n_hash) {
51637 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51638 + PAGE_SIZE)
51639 + kfree(name_set.n_hash);
51640 + else
51641 + vfree(name_set.n_hash);
51642 + }
51643 +
51644 + if (inodev_set.i_hash) {
51645 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51646 + PAGE_SIZE)
51647 + kfree(inodev_set.i_hash);
51648 + else
51649 + vfree(inodev_set.i_hash);
51650 + }
51651 +
51652 + gr_free_uidset();
51653 +
51654 + memset(&name_set, 0, sizeof (struct name_db));
51655 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51656 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51657 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51658 +
51659 + default_role = NULL;
51660 + kernel_role = NULL;
51661 + role_list = NULL;
51662 +
51663 + return;
51664 +}
51665 +
51666 +static __u32
51667 +count_user_objs(struct acl_object_label *userp)
51668 +{
51669 + struct acl_object_label o_tmp;
51670 + __u32 num = 0;
51671 +
51672 + while (userp) {
51673 + if (copy_from_user(&o_tmp, userp,
51674 + sizeof (struct acl_object_label)))
51675 + break;
51676 +
51677 + userp = o_tmp.prev;
51678 + num++;
51679 + }
51680 +
51681 + return num;
51682 +}
51683 +
51684 +static struct acl_subject_label *
51685 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51686 +
51687 +static int
51688 +copy_user_glob(struct acl_object_label *obj)
51689 +{
51690 + struct acl_object_label *g_tmp, **guser;
51691 + unsigned int len;
51692 + char *tmp;
51693 +
51694 + if (obj->globbed == NULL)
51695 + return 0;
51696 +
51697 + guser = &obj->globbed;
51698 + while (*guser) {
51699 + g_tmp = (struct acl_object_label *)
51700 + acl_alloc(sizeof (struct acl_object_label));
51701 + if (g_tmp == NULL)
51702 + return -ENOMEM;
51703 +
51704 + if (copy_from_user(g_tmp, *guser,
51705 + sizeof (struct acl_object_label)))
51706 + return -EFAULT;
51707 +
51708 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51709 +
51710 + if (!len || len >= PATH_MAX)
51711 + return -EINVAL;
51712 +
51713 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51714 + return -ENOMEM;
51715 +
51716 + if (copy_from_user(tmp, g_tmp->filename, len))
51717 + return -EFAULT;
51718 + tmp[len-1] = '\0';
51719 + g_tmp->filename = tmp;
51720 +
51721 + *guser = g_tmp;
51722 + guser = &(g_tmp->next);
51723 + }
51724 +
51725 + return 0;
51726 +}
51727 +
51728 +static int
51729 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51730 + struct acl_role_label *role)
51731 +{
51732 + struct acl_object_label *o_tmp;
51733 + unsigned int len;
51734 + int ret;
51735 + char *tmp;
51736 +
51737 + while (userp) {
51738 + if ((o_tmp = (struct acl_object_label *)
51739 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51740 + return -ENOMEM;
51741 +
51742 + if (copy_from_user(o_tmp, userp,
51743 + sizeof (struct acl_object_label)))
51744 + return -EFAULT;
51745 +
51746 + userp = o_tmp->prev;
51747 +
51748 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51749 +
51750 + if (!len || len >= PATH_MAX)
51751 + return -EINVAL;
51752 +
51753 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51754 + return -ENOMEM;
51755 +
51756 + if (copy_from_user(tmp, o_tmp->filename, len))
51757 + return -EFAULT;
51758 + tmp[len-1] = '\0';
51759 + o_tmp->filename = tmp;
51760 +
51761 + insert_acl_obj_label(o_tmp, subj);
51762 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51763 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51764 + return -ENOMEM;
51765 +
51766 + ret = copy_user_glob(o_tmp);
51767 + if (ret)
51768 + return ret;
51769 +
51770 + if (o_tmp->nested) {
51771 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51772 + if (IS_ERR(o_tmp->nested))
51773 + return PTR_ERR(o_tmp->nested);
51774 +
51775 + /* insert into nested subject list */
51776 + o_tmp->nested->next = role->hash->first;
51777 + role->hash->first = o_tmp->nested;
51778 + }
51779 + }
51780 +
51781 + return 0;
51782 +}
51783 +
51784 +static __u32
51785 +count_user_subjs(struct acl_subject_label *userp)
51786 +{
51787 + struct acl_subject_label s_tmp;
51788 + __u32 num = 0;
51789 +
51790 + while (userp) {
51791 + if (copy_from_user(&s_tmp, userp,
51792 + sizeof (struct acl_subject_label)))
51793 + break;
51794 +
51795 + userp = s_tmp.prev;
51796 + /* do not count nested subjects against this count, since
51797 + they are not included in the hash table, but are
51798 + attached to objects. We have already counted
51799 + the subjects in userspace for the allocation
51800 + stack
51801 + */
51802 + if (!(s_tmp.mode & GR_NESTED))
51803 + num++;
51804 + }
51805 +
51806 + return num;
51807 +}
51808 +
51809 +static int
51810 +copy_user_allowedips(struct acl_role_label *rolep)
51811 +{
51812 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51813 +
51814 + ruserip = rolep->allowed_ips;
51815 +
51816 + while (ruserip) {
51817 + rlast = rtmp;
51818 +
51819 + if ((rtmp = (struct role_allowed_ip *)
51820 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51821 + return -ENOMEM;
51822 +
51823 + if (copy_from_user(rtmp, ruserip,
51824 + sizeof (struct role_allowed_ip)))
51825 + return -EFAULT;
51826 +
51827 + ruserip = rtmp->prev;
51828 +
51829 + if (!rlast) {
51830 + rtmp->prev = NULL;
51831 + rolep->allowed_ips = rtmp;
51832 + } else {
51833 + rlast->next = rtmp;
51834 + rtmp->prev = rlast;
51835 + }
51836 +
51837 + if (!ruserip)
51838 + rtmp->next = NULL;
51839 + }
51840 +
51841 + return 0;
51842 +}
51843 +
51844 +static int
51845 +copy_user_transitions(struct acl_role_label *rolep)
51846 +{
51847 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51848 +
51849 + unsigned int len;
51850 + char *tmp;
51851 +
51852 + rusertp = rolep->transitions;
51853 +
51854 + while (rusertp) {
51855 + rlast = rtmp;
51856 +
51857 + if ((rtmp = (struct role_transition *)
51858 + acl_alloc(sizeof (struct role_transition))) == NULL)
51859 + return -ENOMEM;
51860 +
51861 + if (copy_from_user(rtmp, rusertp,
51862 + sizeof (struct role_transition)))
51863 + return -EFAULT;
51864 +
51865 + rusertp = rtmp->prev;
51866 +
51867 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51868 +
51869 + if (!len || len >= GR_SPROLE_LEN)
51870 + return -EINVAL;
51871 +
51872 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51873 + return -ENOMEM;
51874 +
51875 + if (copy_from_user(tmp, rtmp->rolename, len))
51876 + return -EFAULT;
51877 + tmp[len-1] = '\0';
51878 + rtmp->rolename = tmp;
51879 +
51880 + if (!rlast) {
51881 + rtmp->prev = NULL;
51882 + rolep->transitions = rtmp;
51883 + } else {
51884 + rlast->next = rtmp;
51885 + rtmp->prev = rlast;
51886 + }
51887 +
51888 + if (!rusertp)
51889 + rtmp->next = NULL;
51890 + }
51891 +
51892 + return 0;
51893 +}
51894 +
51895 +static struct acl_subject_label *
51896 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51897 +{
51898 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51899 + unsigned int len;
51900 + char *tmp;
51901 + __u32 num_objs;
51902 + struct acl_ip_label **i_tmp, *i_utmp2;
51903 + struct gr_hash_struct ghash;
51904 + struct subject_map *subjmap;
51905 + unsigned int i_num;
51906 + int err;
51907 +
51908 + s_tmp = lookup_subject_map(userp);
51909 +
51910 + /* we've already copied this subject into the kernel, just return
51911 + the reference to it, and don't copy it over again
51912 + */
51913 + if (s_tmp)
51914 + return(s_tmp);
51915 +
51916 + if ((s_tmp = (struct acl_subject_label *)
51917 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51918 + return ERR_PTR(-ENOMEM);
51919 +
51920 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51921 + if (subjmap == NULL)
51922 + return ERR_PTR(-ENOMEM);
51923 +
51924 + subjmap->user = userp;
51925 + subjmap->kernel = s_tmp;
51926 + insert_subj_map_entry(subjmap);
51927 +
51928 + if (copy_from_user(s_tmp, userp,
51929 + sizeof (struct acl_subject_label)))
51930 + return ERR_PTR(-EFAULT);
51931 +
51932 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51933 +
51934 + if (!len || len >= PATH_MAX)
51935 + return ERR_PTR(-EINVAL);
51936 +
51937 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51938 + return ERR_PTR(-ENOMEM);
51939 +
51940 + if (copy_from_user(tmp, s_tmp->filename, len))
51941 + return ERR_PTR(-EFAULT);
51942 + tmp[len-1] = '\0';
51943 + s_tmp->filename = tmp;
51944 +
51945 + if (!strcmp(s_tmp->filename, "/"))
51946 + role->root_label = s_tmp;
51947 +
51948 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51949 + return ERR_PTR(-EFAULT);
51950 +
51951 + /* copy user and group transition tables */
51952 +
51953 + if (s_tmp->user_trans_num) {
51954 + uid_t *uidlist;
51955 +
51956 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51957 + if (uidlist == NULL)
51958 + return ERR_PTR(-ENOMEM);
51959 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51960 + return ERR_PTR(-EFAULT);
51961 +
51962 + s_tmp->user_transitions = uidlist;
51963 + }
51964 +
51965 + if (s_tmp->group_trans_num) {
51966 + gid_t *gidlist;
51967 +
51968 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51969 + if (gidlist == NULL)
51970 + return ERR_PTR(-ENOMEM);
51971 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51972 + return ERR_PTR(-EFAULT);
51973 +
51974 + s_tmp->group_transitions = gidlist;
51975 + }
51976 +
51977 + /* set up object hash table */
51978 + num_objs = count_user_objs(ghash.first);
51979 +
51980 + s_tmp->obj_hash_size = num_objs;
51981 + s_tmp->obj_hash =
51982 + (struct acl_object_label **)
51983 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51984 +
51985 + if (!s_tmp->obj_hash)
51986 + return ERR_PTR(-ENOMEM);
51987 +
51988 + memset(s_tmp->obj_hash, 0,
51989 + s_tmp->obj_hash_size *
51990 + sizeof (struct acl_object_label *));
51991 +
51992 + /* add in objects */
51993 + err = copy_user_objs(ghash.first, s_tmp, role);
51994 +
51995 + if (err)
51996 + return ERR_PTR(err);
51997 +
51998 + /* set pointer for parent subject */
51999 + if (s_tmp->parent_subject) {
52000 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52001 +
52002 + if (IS_ERR(s_tmp2))
52003 + return s_tmp2;
52004 +
52005 + s_tmp->parent_subject = s_tmp2;
52006 + }
52007 +
52008 + /* add in ip acls */
52009 +
52010 + if (!s_tmp->ip_num) {
52011 + s_tmp->ips = NULL;
52012 + goto insert;
52013 + }
52014 +
52015 + i_tmp =
52016 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52017 + sizeof (struct acl_ip_label *));
52018 +
52019 + if (!i_tmp)
52020 + return ERR_PTR(-ENOMEM);
52021 +
52022 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52023 + *(i_tmp + i_num) =
52024 + (struct acl_ip_label *)
52025 + acl_alloc(sizeof (struct acl_ip_label));
52026 + if (!*(i_tmp + i_num))
52027 + return ERR_PTR(-ENOMEM);
52028 +
52029 + if (copy_from_user
52030 + (&i_utmp2, s_tmp->ips + i_num,
52031 + sizeof (struct acl_ip_label *)))
52032 + return ERR_PTR(-EFAULT);
52033 +
52034 + if (copy_from_user
52035 + (*(i_tmp + i_num), i_utmp2,
52036 + sizeof (struct acl_ip_label)))
52037 + return ERR_PTR(-EFAULT);
52038 +
52039 + if ((*(i_tmp + i_num))->iface == NULL)
52040 + continue;
52041 +
52042 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52043 + if (!len || len >= IFNAMSIZ)
52044 + return ERR_PTR(-EINVAL);
52045 + tmp = acl_alloc(len);
52046 + if (tmp == NULL)
52047 + return ERR_PTR(-ENOMEM);
52048 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52049 + return ERR_PTR(-EFAULT);
52050 + (*(i_tmp + i_num))->iface = tmp;
52051 + }
52052 +
52053 + s_tmp->ips = i_tmp;
52054 +
52055 +insert:
52056 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52057 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52058 + return ERR_PTR(-ENOMEM);
52059 +
52060 + return s_tmp;
52061 +}
52062 +
52063 +static int
52064 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52065 +{
52066 + struct acl_subject_label s_pre;
52067 + struct acl_subject_label * ret;
52068 + int err;
52069 +
52070 + while (userp) {
52071 + if (copy_from_user(&s_pre, userp,
52072 + sizeof (struct acl_subject_label)))
52073 + return -EFAULT;
52074 +
52075 + /* do not add nested subjects here, add
52076 + while parsing objects
52077 + */
52078 +
52079 + if (s_pre.mode & GR_NESTED) {
52080 + userp = s_pre.prev;
52081 + continue;
52082 + }
52083 +
52084 + ret = do_copy_user_subj(userp, role);
52085 +
52086 + err = PTR_ERR(ret);
52087 + if (IS_ERR(ret))
52088 + return err;
52089 +
52090 + insert_acl_subj_label(ret, role);
52091 +
52092 + userp = s_pre.prev;
52093 + }
52094 +
52095 + return 0;
52096 +}
52097 +
52098 +static int
52099 +copy_user_acl(struct gr_arg *arg)
52100 +{
52101 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52102 + struct sprole_pw *sptmp;
52103 + struct gr_hash_struct *ghash;
52104 + uid_t *domainlist;
52105 + unsigned int r_num;
52106 + unsigned int len;
52107 + char *tmp;
52108 + int err = 0;
52109 + __u16 i;
52110 + __u32 num_subjs;
52111 +
52112 + /* we need a default and kernel role */
52113 + if (arg->role_db.num_roles < 2)
52114 + return -EINVAL;
52115 +
52116 + /* copy special role authentication info from userspace */
52117 +
52118 + num_sprole_pws = arg->num_sprole_pws;
52119 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52120 +
52121 + if (!acl_special_roles && num_sprole_pws)
52122 + return -ENOMEM;
52123 +
52124 + for (i = 0; i < num_sprole_pws; i++) {
52125 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52126 + if (!sptmp)
52127 + return -ENOMEM;
52128 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52129 + sizeof (struct sprole_pw)))
52130 + return -EFAULT;
52131 +
52132 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52133 +
52134 + if (!len || len >= GR_SPROLE_LEN)
52135 + return -EINVAL;
52136 +
52137 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52138 + return -ENOMEM;
52139 +
52140 + if (copy_from_user(tmp, sptmp->rolename, len))
52141 + return -EFAULT;
52142 +
52143 + tmp[len-1] = '\0';
52144 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52145 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52146 +#endif
52147 + sptmp->rolename = tmp;
52148 + acl_special_roles[i] = sptmp;
52149 + }
52150 +
52151 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52152 +
52153 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52154 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52155 +
52156 + if (!r_tmp)
52157 + return -ENOMEM;
52158 +
52159 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52160 + sizeof (struct acl_role_label *)))
52161 + return -EFAULT;
52162 +
52163 + if (copy_from_user(r_tmp, r_utmp2,
52164 + sizeof (struct acl_role_label)))
52165 + return -EFAULT;
52166 +
52167 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52168 +
52169 + if (!len || len >= PATH_MAX)
52170 + return -EINVAL;
52171 +
52172 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52173 + return -ENOMEM;
52174 +
52175 + if (copy_from_user(tmp, r_tmp->rolename, len))
52176 + return -EFAULT;
52177 +
52178 + tmp[len-1] = '\0';
52179 + r_tmp->rolename = tmp;
52180 +
52181 + if (!strcmp(r_tmp->rolename, "default")
52182 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52183 + default_role = r_tmp;
52184 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52185 + kernel_role = r_tmp;
52186 + }
52187 +
52188 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52189 + return -ENOMEM;
52190 +
52191 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52192 + return -EFAULT;
52193 +
52194 + r_tmp->hash = ghash;
52195 +
52196 + num_subjs = count_user_subjs(r_tmp->hash->first);
52197 +
52198 + r_tmp->subj_hash_size = num_subjs;
52199 + r_tmp->subj_hash =
52200 + (struct acl_subject_label **)
52201 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52202 +
52203 + if (!r_tmp->subj_hash)
52204 + return -ENOMEM;
52205 +
52206 + err = copy_user_allowedips(r_tmp);
52207 + if (err)
52208 + return err;
52209 +
52210 + /* copy domain info */
52211 + if (r_tmp->domain_children != NULL) {
52212 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52213 + if (domainlist == NULL)
52214 + return -ENOMEM;
52215 +
52216 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52217 + return -EFAULT;
52218 +
52219 + r_tmp->domain_children = domainlist;
52220 + }
52221 +
52222 + err = copy_user_transitions(r_tmp);
52223 + if (err)
52224 + return err;
52225 +
52226 + memset(r_tmp->subj_hash, 0,
52227 + r_tmp->subj_hash_size *
52228 + sizeof (struct acl_subject_label *));
52229 +
52230 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52231 +
52232 + if (err)
52233 + return err;
52234 +
52235 + /* set nested subject list to null */
52236 + r_tmp->hash->first = NULL;
52237 +
52238 + insert_acl_role_label(r_tmp);
52239 + }
52240 +
52241 + if (default_role == NULL || kernel_role == NULL)
52242 + return -EINVAL;
52243 +
52244 + return err;
52245 +}
52246 +
52247 +static int
52248 +gracl_init(struct gr_arg *args)
52249 +{
52250 + int error = 0;
52251 +
52252 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52253 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52254 +
52255 + if (init_variables(args)) {
52256 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52257 + error = -ENOMEM;
52258 + free_variables();
52259 + goto out;
52260 + }
52261 +
52262 + error = copy_user_acl(args);
52263 + free_init_variables();
52264 + if (error) {
52265 + free_variables();
52266 + goto out;
52267 + }
52268 +
52269 + if ((error = gr_set_acls(0))) {
52270 + free_variables();
52271 + goto out;
52272 + }
52273 +
52274 + pax_open_kernel();
52275 + gr_status |= GR_READY;
52276 + pax_close_kernel();
52277 +
52278 + out:
52279 + return error;
52280 +}
52281 +
52282 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52283 +
52284 +static int
52285 +glob_match(const char *p, const char *n)
52286 +{
52287 + char c;
52288 +
52289 + while ((c = *p++) != '\0') {
52290 + switch (c) {
52291 + case '?':
52292 + if (*n == '\0')
52293 + return 1;
52294 + else if (*n == '/')
52295 + return 1;
52296 + break;
52297 + case '\\':
52298 + if (*n != c)
52299 + return 1;
52300 + break;
52301 + case '*':
52302 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52303 + if (*n == '/')
52304 + return 1;
52305 + else if (c == '?') {
52306 + if (*n == '\0')
52307 + return 1;
52308 + else
52309 + ++n;
52310 + }
52311 + }
52312 + if (c == '\0') {
52313 + return 0;
52314 + } else {
52315 + const char *endp;
52316 +
52317 + if ((endp = strchr(n, '/')) == NULL)
52318 + endp = n + strlen(n);
52319 +
52320 + if (c == '[') {
52321 + for (--p; n < endp; ++n)
52322 + if (!glob_match(p, n))
52323 + return 0;
52324 + } else if (c == '/') {
52325 + while (*n != '\0' && *n != '/')
52326 + ++n;
52327 + if (*n == '/' && !glob_match(p, n + 1))
52328 + return 0;
52329 + } else {
52330 + for (--p; n < endp; ++n)
52331 + if (*n == c && !glob_match(p, n))
52332 + return 0;
52333 + }
52334 +
52335 + return 1;
52336 + }
52337 + case '[':
52338 + {
52339 + int not;
52340 + char cold;
52341 +
52342 + if (*n == '\0' || *n == '/')
52343 + return 1;
52344 +
52345 + not = (*p == '!' || *p == '^');
52346 + if (not)
52347 + ++p;
52348 +
52349 + c = *p++;
52350 + for (;;) {
52351 + unsigned char fn = (unsigned char)*n;
52352 +
52353 + if (c == '\0')
52354 + return 1;
52355 + else {
52356 + if (c == fn)
52357 + goto matched;
52358 + cold = c;
52359 + c = *p++;
52360 +
52361 + if (c == '-' && *p != ']') {
52362 + unsigned char cend = *p++;
52363 +
52364 + if (cend == '\0')
52365 + return 1;
52366 +
52367 + if (cold <= fn && fn <= cend)
52368 + goto matched;
52369 +
52370 + c = *p++;
52371 + }
52372 + }
52373 +
52374 + if (c == ']')
52375 + break;
52376 + }
52377 + if (!not)
52378 + return 1;
52379 + break;
52380 + matched:
52381 + while (c != ']') {
52382 + if (c == '\0')
52383 + return 1;
52384 +
52385 + c = *p++;
52386 + }
52387 + if (not)
52388 + return 1;
52389 + }
52390 + break;
52391 + default:
52392 + if (c != *n)
52393 + return 1;
52394 + }
52395 +
52396 + ++n;
52397 + }
52398 +
52399 + if (*n == '\0')
52400 + return 0;
52401 +
52402 + if (*n == '/')
52403 + return 0;
52404 +
52405 + return 1;
52406 +}
52407 +
52408 +static struct acl_object_label *
52409 +chk_glob_label(struct acl_object_label *globbed,
52410 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52411 +{
52412 + struct acl_object_label *tmp;
52413 +
52414 + if (*path == NULL)
52415 + *path = gr_to_filename_nolock(dentry, mnt);
52416 +
52417 + tmp = globbed;
52418 +
52419 + while (tmp) {
52420 + if (!glob_match(tmp->filename, *path))
52421 + return tmp;
52422 + tmp = tmp->next;
52423 + }
52424 +
52425 + return NULL;
52426 +}
52427 +
52428 +static struct acl_object_label *
52429 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52430 + const ino_t curr_ino, const dev_t curr_dev,
52431 + const struct acl_subject_label *subj, char **path, const int checkglob)
52432 +{
52433 + struct acl_subject_label *tmpsubj;
52434 + struct acl_object_label *retval;
52435 + struct acl_object_label *retval2;
52436 +
52437 + tmpsubj = (struct acl_subject_label *) subj;
52438 + read_lock(&gr_inode_lock);
52439 + do {
52440 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52441 + if (retval) {
52442 + if (checkglob && retval->globbed) {
52443 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52444 + if (retval2)
52445 + retval = retval2;
52446 + }
52447 + break;
52448 + }
52449 + } while ((tmpsubj = tmpsubj->parent_subject));
52450 + read_unlock(&gr_inode_lock);
52451 +
52452 + return retval;
52453 +}
52454 +
52455 +static __inline__ struct acl_object_label *
52456 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52457 + struct dentry *curr_dentry,
52458 + const struct acl_subject_label *subj, char **path, const int checkglob)
52459 +{
52460 + int newglob = checkglob;
52461 + ino_t inode;
52462 + dev_t device;
52463 +
52464 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52465 + as we don't want a / * rule to match instead of the / object
52466 + don't do this for create lookups that call this function though, since they're looking up
52467 + on the parent and thus need globbing checks on all paths
52468 + */
52469 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52470 + newglob = GR_NO_GLOB;
52471 +
52472 + spin_lock(&curr_dentry->d_lock);
52473 + inode = curr_dentry->d_inode->i_ino;
52474 + device = __get_dev(curr_dentry);
52475 + spin_unlock(&curr_dentry->d_lock);
52476 +
52477 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52478 +}
52479 +
52480 +static struct acl_object_label *
52481 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52482 + const struct acl_subject_label *subj, char *path, const int checkglob)
52483 +{
52484 + struct dentry *dentry = (struct dentry *) l_dentry;
52485 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52486 + struct mount *real_mnt = real_mount(mnt);
52487 + struct acl_object_label *retval;
52488 + struct dentry *parent;
52489 +
52490 + write_seqlock(&rename_lock);
52491 + br_read_lock(vfsmount_lock);
52492 +
52493 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52494 +#ifdef CONFIG_NET
52495 + mnt == sock_mnt ||
52496 +#endif
52497 +#ifdef CONFIG_HUGETLBFS
52498 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52499 +#endif
52500 + /* ignore Eric Biederman */
52501 + IS_PRIVATE(l_dentry->d_inode))) {
52502 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52503 + goto out;
52504 + }
52505 +
52506 + for (;;) {
52507 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52508 + break;
52509 +
52510 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52511 + if (!mnt_has_parent(real_mnt))
52512 + break;
52513 +
52514 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52515 + if (retval != NULL)
52516 + goto out;
52517 +
52518 + dentry = real_mnt->mnt_mountpoint;
52519 + real_mnt = real_mnt->mnt_parent;
52520 + mnt = &real_mnt->mnt;
52521 + continue;
52522 + }
52523 +
52524 + parent = dentry->d_parent;
52525 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52526 + if (retval != NULL)
52527 + goto out;
52528 +
52529 + dentry = parent;
52530 + }
52531 +
52532 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52533 +
52534 + /* real_root is pinned so we don't have to hold a reference */
52535 + if (retval == NULL)
52536 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52537 +out:
52538 + br_read_unlock(vfsmount_lock);
52539 + write_sequnlock(&rename_lock);
52540 +
52541 + BUG_ON(retval == NULL);
52542 +
52543 + return retval;
52544 +}
52545 +
52546 +static __inline__ struct acl_object_label *
52547 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52548 + const struct acl_subject_label *subj)
52549 +{
52550 + char *path = NULL;
52551 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52552 +}
52553 +
52554 +static __inline__ struct acl_object_label *
52555 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52556 + const struct acl_subject_label *subj)
52557 +{
52558 + char *path = NULL;
52559 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52560 +}
52561 +
52562 +static __inline__ struct acl_object_label *
52563 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52564 + const struct acl_subject_label *subj, char *path)
52565 +{
52566 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52567 +}
52568 +
52569 +static struct acl_subject_label *
52570 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52571 + const struct acl_role_label *role)
52572 +{
52573 + struct dentry *dentry = (struct dentry *) l_dentry;
52574 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52575 + struct mount *real_mnt = real_mount(mnt);
52576 + struct acl_subject_label *retval;
52577 + struct dentry *parent;
52578 +
52579 + write_seqlock(&rename_lock);
52580 + br_read_lock(vfsmount_lock);
52581 +
52582 + for (;;) {
52583 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52584 + break;
52585 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52586 + if (!mnt_has_parent(real_mnt))
52587 + break;
52588 +
52589 + spin_lock(&dentry->d_lock);
52590 + read_lock(&gr_inode_lock);
52591 + retval =
52592 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52593 + __get_dev(dentry), role);
52594 + read_unlock(&gr_inode_lock);
52595 + spin_unlock(&dentry->d_lock);
52596 + if (retval != NULL)
52597 + goto out;
52598 +
52599 + dentry = real_mnt->mnt_mountpoint;
52600 + real_mnt = real_mnt->mnt_parent;
52601 + mnt = &real_mnt->mnt;
52602 + continue;
52603 + }
52604 +
52605 + spin_lock(&dentry->d_lock);
52606 + read_lock(&gr_inode_lock);
52607 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52608 + __get_dev(dentry), role);
52609 + read_unlock(&gr_inode_lock);
52610 + parent = dentry->d_parent;
52611 + spin_unlock(&dentry->d_lock);
52612 +
52613 + if (retval != NULL)
52614 + goto out;
52615 +
52616 + dentry = parent;
52617 + }
52618 +
52619 + spin_lock(&dentry->d_lock);
52620 + read_lock(&gr_inode_lock);
52621 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52622 + __get_dev(dentry), role);
52623 + read_unlock(&gr_inode_lock);
52624 + spin_unlock(&dentry->d_lock);
52625 +
52626 + if (unlikely(retval == NULL)) {
52627 + /* real_root is pinned, we don't need to hold a reference */
52628 + read_lock(&gr_inode_lock);
52629 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52630 + __get_dev(real_root.dentry), role);
52631 + read_unlock(&gr_inode_lock);
52632 + }
52633 +out:
52634 + br_read_unlock(vfsmount_lock);
52635 + write_sequnlock(&rename_lock);
52636 +
52637 + BUG_ON(retval == NULL);
52638 +
52639 + return retval;
52640 +}
52641 +
52642 +static void
52643 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52644 +{
52645 + struct task_struct *task = current;
52646 + const struct cred *cred = current_cred();
52647 +
52648 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52649 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52650 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52651 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52652 +
52653 + return;
52654 +}
52655 +
52656 +static void
52657 +gr_log_learn_sysctl(const char *path, const __u32 mode)
52658 +{
52659 + struct task_struct *task = current;
52660 + const struct cred *cred = current_cred();
52661 +
52662 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52663 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52664 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52665 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52666 +
52667 + return;
52668 +}
52669 +
52670 +static void
52671 +gr_log_learn_id_change(const char type, const unsigned int real,
52672 + const unsigned int effective, const unsigned int fs)
52673 +{
52674 + struct task_struct *task = current;
52675 + const struct cred *cred = current_cred();
52676 +
52677 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52678 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52679 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52680 + type, real, effective, fs, &task->signal->saved_ip);
52681 +
52682 + return;
52683 +}
52684 +
52685 +__u32
52686 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52687 + const struct vfsmount * mnt)
52688 +{
52689 + __u32 retval = mode;
52690 + struct acl_subject_label *curracl;
52691 + struct acl_object_label *currobj;
52692 +
52693 + if (unlikely(!(gr_status & GR_READY)))
52694 + return (mode & ~GR_AUDITS);
52695 +
52696 + curracl = current->acl;
52697 +
52698 + currobj = chk_obj_label(dentry, mnt, curracl);
52699 + retval = currobj->mode & mode;
52700 +
52701 + /* if we're opening a specified transfer file for writing
52702 + (e.g. /dev/initctl), then transfer our role to init
52703 + */
52704 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52705 + current->role->roletype & GR_ROLE_PERSIST)) {
52706 + struct task_struct *task = init_pid_ns.child_reaper;
52707 +
52708 + if (task->role != current->role) {
52709 + task->acl_sp_role = 0;
52710 + task->acl_role_id = current->acl_role_id;
52711 + task->role = current->role;
52712 + rcu_read_lock();
52713 + read_lock(&grsec_exec_file_lock);
52714 + gr_apply_subject_to_task(task);
52715 + read_unlock(&grsec_exec_file_lock);
52716 + rcu_read_unlock();
52717 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52718 + }
52719 + }
52720 +
52721 + if (unlikely
52722 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52723 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52724 + __u32 new_mode = mode;
52725 +
52726 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52727 +
52728 + retval = new_mode;
52729 +
52730 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52731 + new_mode |= GR_INHERIT;
52732 +
52733 + if (!(mode & GR_NOLEARN))
52734 + gr_log_learn(dentry, mnt, new_mode);
52735 + }
52736 +
52737 + return retval;
52738 +}
52739 +
52740 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52741 + const struct dentry *parent,
52742 + const struct vfsmount *mnt)
52743 +{
52744 + struct name_entry *match;
52745 + struct acl_object_label *matchpo;
52746 + struct acl_subject_label *curracl;
52747 + char *path;
52748 +
52749 + if (unlikely(!(gr_status & GR_READY)))
52750 + return NULL;
52751 +
52752 + preempt_disable();
52753 + path = gr_to_filename_rbac(new_dentry, mnt);
52754 + match = lookup_name_entry_create(path);
52755 +
52756 + curracl = current->acl;
52757 +
52758 + if (match) {
52759 + read_lock(&gr_inode_lock);
52760 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52761 + read_unlock(&gr_inode_lock);
52762 +
52763 + if (matchpo) {
52764 + preempt_enable();
52765 + return matchpo;
52766 + }
52767 + }
52768 +
52769 + // lookup parent
52770 +
52771 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52772 +
52773 + preempt_enable();
52774 + return matchpo;
52775 +}
52776 +
52777 +__u32
52778 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52779 + const struct vfsmount * mnt, const __u32 mode)
52780 +{
52781 + struct acl_object_label *matchpo;
52782 + __u32 retval;
52783 +
52784 + if (unlikely(!(gr_status & GR_READY)))
52785 + return (mode & ~GR_AUDITS);
52786 +
52787 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52788 +
52789 + retval = matchpo->mode & mode;
52790 +
52791 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52792 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52793 + __u32 new_mode = mode;
52794 +
52795 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52796 +
52797 + gr_log_learn(new_dentry, mnt, new_mode);
52798 + return new_mode;
52799 + }
52800 +
52801 + return retval;
52802 +}
52803 +
52804 +__u32
52805 +gr_check_link(const struct dentry * new_dentry,
52806 + const struct dentry * parent_dentry,
52807 + const struct vfsmount * parent_mnt,
52808 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52809 +{
52810 + struct acl_object_label *obj;
52811 + __u32 oldmode, newmode;
52812 + __u32 needmode;
52813 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52814 + GR_DELETE | GR_INHERIT;
52815 +
52816 + if (unlikely(!(gr_status & GR_READY)))
52817 + return (GR_CREATE | GR_LINK);
52818 +
52819 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52820 + oldmode = obj->mode;
52821 +
52822 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52823 + newmode = obj->mode;
52824 +
52825 + needmode = newmode & checkmodes;
52826 +
52827 + // old name for hardlink must have at least the permissions of the new name
52828 + if ((oldmode & needmode) != needmode)
52829 + goto bad;
52830 +
52831 + // if old name had restrictions/auditing, make sure the new name does as well
52832 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52833 +
52834 + // don't allow hardlinking of suid/sgid files without permission
52835 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52836 + needmode |= GR_SETID;
52837 +
52838 + if ((newmode & needmode) != needmode)
52839 + goto bad;
52840 +
52841 + // enforce minimum permissions
52842 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52843 + return newmode;
52844 +bad:
52845 + needmode = oldmode;
52846 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52847 + needmode |= GR_SETID;
52848 +
52849 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52850 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52851 + return (GR_CREATE | GR_LINK);
52852 + } else if (newmode & GR_SUPPRESS)
52853 + return GR_SUPPRESS;
52854 + else
52855 + return 0;
52856 +}
52857 +
52858 +int
52859 +gr_check_hidden_task(const struct task_struct *task)
52860 +{
52861 + if (unlikely(!(gr_status & GR_READY)))
52862 + return 0;
52863 +
52864 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52865 + return 1;
52866 +
52867 + return 0;
52868 +}
52869 +
52870 +int
52871 +gr_check_protected_task(const struct task_struct *task)
52872 +{
52873 + if (unlikely(!(gr_status & GR_READY) || !task))
52874 + return 0;
52875 +
52876 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52877 + task->acl != current->acl)
52878 + return 1;
52879 +
52880 + return 0;
52881 +}
52882 +
52883 +int
52884 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52885 +{
52886 + struct task_struct *p;
52887 + int ret = 0;
52888 +
52889 + if (unlikely(!(gr_status & GR_READY) || !pid))
52890 + return ret;
52891 +
52892 + read_lock(&tasklist_lock);
52893 + do_each_pid_task(pid, type, p) {
52894 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52895 + p->acl != current->acl) {
52896 + ret = 1;
52897 + goto out;
52898 + }
52899 + } while_each_pid_task(pid, type, p);
52900 +out:
52901 + read_unlock(&tasklist_lock);
52902 +
52903 + return ret;
52904 +}
52905 +
52906 +void
52907 +gr_copy_label(struct task_struct *tsk)
52908 +{
52909 + /* plain copying of fields is already done by dup_task_struct */
52910 + tsk->signal->used_accept = 0;
52911 + tsk->acl_sp_role = 0;
52912 + //tsk->acl_role_id = current->acl_role_id;
52913 + //tsk->acl = current->acl;
52914 + //tsk->role = current->role;
52915 + tsk->signal->curr_ip = current->signal->curr_ip;
52916 + tsk->signal->saved_ip = current->signal->saved_ip;
52917 + if (current->exec_file)
52918 + get_file(current->exec_file);
52919 + //tsk->exec_file = current->exec_file;
52920 + //tsk->is_writable = current->is_writable;
52921 + if (unlikely(current->signal->used_accept)) {
52922 + current->signal->curr_ip = 0;
52923 + current->signal->saved_ip = 0;
52924 + }
52925 +
52926 + return;
52927 +}
52928 +
52929 +static void
52930 +gr_set_proc_res(struct task_struct *task)
52931 +{
52932 + struct acl_subject_label *proc;
52933 + unsigned short i;
52934 +
52935 + proc = task->acl;
52936 +
52937 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52938 + return;
52939 +
52940 + for (i = 0; i < RLIM_NLIMITS; i++) {
52941 + if (!(proc->resmask & (1 << i)))
52942 + continue;
52943 +
52944 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52945 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52946 + }
52947 +
52948 + return;
52949 +}
52950 +
52951 +extern int __gr_process_user_ban(struct user_struct *user);
52952 +
52953 +int
52954 +gr_check_user_change(int real, int effective, int fs)
52955 +{
52956 + unsigned int i;
52957 + __u16 num;
52958 + uid_t *uidlist;
52959 + int curuid;
52960 + int realok = 0;
52961 + int effectiveok = 0;
52962 + int fsok = 0;
52963 +
52964 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52965 + struct user_struct *user;
52966 +
52967 + if (real == -1)
52968 + goto skipit;
52969 +
52970 + user = find_user(real);
52971 + if (user == NULL)
52972 + goto skipit;
52973 +
52974 + if (__gr_process_user_ban(user)) {
52975 + /* for find_user */
52976 + free_uid(user);
52977 + return 1;
52978 + }
52979 +
52980 + /* for find_user */
52981 + free_uid(user);
52982 +
52983 +skipit:
52984 +#endif
52985 +
52986 + if (unlikely(!(gr_status & GR_READY)))
52987 + return 0;
52988 +
52989 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52990 + gr_log_learn_id_change('u', real, effective, fs);
52991 +
52992 + num = current->acl->user_trans_num;
52993 + uidlist = current->acl->user_transitions;
52994 +
52995 + if (uidlist == NULL)
52996 + return 0;
52997 +
52998 + if (real == -1)
52999 + realok = 1;
53000 + if (effective == -1)
53001 + effectiveok = 1;
53002 + if (fs == -1)
53003 + fsok = 1;
53004 +
53005 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53006 + for (i = 0; i < num; i++) {
53007 + curuid = (int)uidlist[i];
53008 + if (real == curuid)
53009 + realok = 1;
53010 + if (effective == curuid)
53011 + effectiveok = 1;
53012 + if (fs == curuid)
53013 + fsok = 1;
53014 + }
53015 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53016 + for (i = 0; i < num; i++) {
53017 + curuid = (int)uidlist[i];
53018 + if (real == curuid)
53019 + break;
53020 + if (effective == curuid)
53021 + break;
53022 + if (fs == curuid)
53023 + break;
53024 + }
53025 + /* not in deny list */
53026 + if (i == num) {
53027 + realok = 1;
53028 + effectiveok = 1;
53029 + fsok = 1;
53030 + }
53031 + }
53032 +
53033 + if (realok && effectiveok && fsok)
53034 + return 0;
53035 + else {
53036 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53037 + return 1;
53038 + }
53039 +}
53040 +
53041 +int
53042 +gr_check_group_change(int real, int effective, int fs)
53043 +{
53044 + unsigned int i;
53045 + __u16 num;
53046 + gid_t *gidlist;
53047 + int curgid;
53048 + int realok = 0;
53049 + int effectiveok = 0;
53050 + int fsok = 0;
53051 +
53052 + if (unlikely(!(gr_status & GR_READY)))
53053 + return 0;
53054 +
53055 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53056 + gr_log_learn_id_change('g', real, effective, fs);
53057 +
53058 + num = current->acl->group_trans_num;
53059 + gidlist = current->acl->group_transitions;
53060 +
53061 + if (gidlist == NULL)
53062 + return 0;
53063 +
53064 + if (real == -1)
53065 + realok = 1;
53066 + if (effective == -1)
53067 + effectiveok = 1;
53068 + if (fs == -1)
53069 + fsok = 1;
53070 +
53071 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
53072 + for (i = 0; i < num; i++) {
53073 + curgid = (int)gidlist[i];
53074 + if (real == curgid)
53075 + realok = 1;
53076 + if (effective == curgid)
53077 + effectiveok = 1;
53078 + if (fs == curgid)
53079 + fsok = 1;
53080 + }
53081 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53082 + for (i = 0; i < num; i++) {
53083 + curgid = (int)gidlist[i];
53084 + if (real == curgid)
53085 + break;
53086 + if (effective == curgid)
53087 + break;
53088 + if (fs == curgid)
53089 + break;
53090 + }
53091 + /* not in deny list */
53092 + if (i == num) {
53093 + realok = 1;
53094 + effectiveok = 1;
53095 + fsok = 1;
53096 + }
53097 + }
53098 +
53099 + if (realok && effectiveok && fsok)
53100 + return 0;
53101 + else {
53102 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53103 + return 1;
53104 + }
53105 +}
53106 +
53107 +extern int gr_acl_is_capable(const int cap);
53108 +
53109 +void
53110 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53111 +{
53112 + struct acl_role_label *role = task->role;
53113 + struct acl_subject_label *subj = NULL;
53114 + struct acl_object_label *obj;
53115 + struct file *filp;
53116 +
53117 + if (unlikely(!(gr_status & GR_READY)))
53118 + return;
53119 +
53120 + filp = task->exec_file;
53121 +
53122 + /* kernel process, we'll give them the kernel role */
53123 + if (unlikely(!filp)) {
53124 + task->role = kernel_role;
53125 + task->acl = kernel_role->root_label;
53126 + return;
53127 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53128 + role = lookup_acl_role_label(task, uid, gid);
53129 +
53130 + /* don't change the role if we're not a privileged process */
53131 + if (role && task->role != role &&
53132 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53133 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53134 + return;
53135 +
53136 + /* perform subject lookup in possibly new role
53137 + we can use this result below in the case where role == task->role
53138 + */
53139 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53140 +
53141 + /* if we changed uid/gid, but result in the same role
53142 + and are using inheritance, don't lose the inherited subject
53143 + if current subject is other than what normal lookup
53144 + would result in, we arrived via inheritance, don't
53145 + lose subject
53146 + */
53147 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53148 + (subj == task->acl)))
53149 + task->acl = subj;
53150 +
53151 + task->role = role;
53152 +
53153 + task->is_writable = 0;
53154 +
53155 + /* ignore additional mmap checks for processes that are writable
53156 + by the default ACL */
53157 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53158 + if (unlikely(obj->mode & GR_WRITE))
53159 + task->is_writable = 1;
53160 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53161 + if (unlikely(obj->mode & GR_WRITE))
53162 + task->is_writable = 1;
53163 +
53164 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53165 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53166 +#endif
53167 +
53168 + gr_set_proc_res(task);
53169 +
53170 + return;
53171 +}
53172 +
53173 +int
53174 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53175 + const int unsafe_flags)
53176 +{
53177 + struct task_struct *task = current;
53178 + struct acl_subject_label *newacl;
53179 + struct acl_object_label *obj;
53180 + __u32 retmode;
53181 +
53182 + if (unlikely(!(gr_status & GR_READY)))
53183 + return 0;
53184 +
53185 + newacl = chk_subj_label(dentry, mnt, task->role);
53186 +
53187 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53188 + did an exec
53189 + */
53190 + rcu_read_lock();
53191 + read_lock(&tasklist_lock);
53192 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53193 + (task->parent->acl->mode & GR_POVERRIDE))) {
53194 + read_unlock(&tasklist_lock);
53195 + rcu_read_unlock();
53196 + goto skip_check;
53197 + }
53198 + read_unlock(&tasklist_lock);
53199 + rcu_read_unlock();
53200 +
53201 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53202 + !(task->role->roletype & GR_ROLE_GOD) &&
53203 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53204 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53205 + if (unsafe_flags & LSM_UNSAFE_SHARE)
53206 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53207 + else
53208 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53209 + return -EACCES;
53210 + }
53211 +
53212 +skip_check:
53213 +
53214 + obj = chk_obj_label(dentry, mnt, task->acl);
53215 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53216 +
53217 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53218 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53219 + if (obj->nested)
53220 + task->acl = obj->nested;
53221 + else
53222 + task->acl = newacl;
53223 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53224 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53225 +
53226 + task->is_writable = 0;
53227 +
53228 + /* ignore additional mmap checks for processes that are writable
53229 + by the default ACL */
53230 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53231 + if (unlikely(obj->mode & GR_WRITE))
53232 + task->is_writable = 1;
53233 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53234 + if (unlikely(obj->mode & GR_WRITE))
53235 + task->is_writable = 1;
53236 +
53237 + gr_set_proc_res(task);
53238 +
53239 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53240 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53241 +#endif
53242 + return 0;
53243 +}
53244 +
53245 +/* always called with valid inodev ptr */
53246 +static void
53247 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53248 +{
53249 + struct acl_object_label *matchpo;
53250 + struct acl_subject_label *matchps;
53251 + struct acl_subject_label *subj;
53252 + struct acl_role_label *role;
53253 + unsigned int x;
53254 +
53255 + FOR_EACH_ROLE_START(role)
53256 + FOR_EACH_SUBJECT_START(role, subj, x)
53257 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53258 + matchpo->mode |= GR_DELETED;
53259 + FOR_EACH_SUBJECT_END(subj,x)
53260 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53261 + if (subj->inode == ino && subj->device == dev)
53262 + subj->mode |= GR_DELETED;
53263 + FOR_EACH_NESTED_SUBJECT_END(subj)
53264 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53265 + matchps->mode |= GR_DELETED;
53266 + FOR_EACH_ROLE_END(role)
53267 +
53268 + inodev->nentry->deleted = 1;
53269 +
53270 + return;
53271 +}
53272 +
53273 +void
53274 +gr_handle_delete(const ino_t ino, const dev_t dev)
53275 +{
53276 + struct inodev_entry *inodev;
53277 +
53278 + if (unlikely(!(gr_status & GR_READY)))
53279 + return;
53280 +
53281 + write_lock(&gr_inode_lock);
53282 + inodev = lookup_inodev_entry(ino, dev);
53283 + if (inodev != NULL)
53284 + do_handle_delete(inodev, ino, dev);
53285 + write_unlock(&gr_inode_lock);
53286 +
53287 + return;
53288 +}
53289 +
53290 +static void
53291 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53292 + const ino_t newinode, const dev_t newdevice,
53293 + struct acl_subject_label *subj)
53294 +{
53295 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53296 + struct acl_object_label *match;
53297 +
53298 + match = subj->obj_hash[index];
53299 +
53300 + while (match && (match->inode != oldinode ||
53301 + match->device != olddevice ||
53302 + !(match->mode & GR_DELETED)))
53303 + match = match->next;
53304 +
53305 + if (match && (match->inode == oldinode)
53306 + && (match->device == olddevice)
53307 + && (match->mode & GR_DELETED)) {
53308 + if (match->prev == NULL) {
53309 + subj->obj_hash[index] = match->next;
53310 + if (match->next != NULL)
53311 + match->next->prev = NULL;
53312 + } else {
53313 + match->prev->next = match->next;
53314 + if (match->next != NULL)
53315 + match->next->prev = match->prev;
53316 + }
53317 + match->prev = NULL;
53318 + match->next = NULL;
53319 + match->inode = newinode;
53320 + match->device = newdevice;
53321 + match->mode &= ~GR_DELETED;
53322 +
53323 + insert_acl_obj_label(match, subj);
53324 + }
53325 +
53326 + return;
53327 +}
53328 +
53329 +static void
53330 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53331 + const ino_t newinode, const dev_t newdevice,
53332 + struct acl_role_label *role)
53333 +{
53334 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53335 + struct acl_subject_label *match;
53336 +
53337 + match = role->subj_hash[index];
53338 +
53339 + while (match && (match->inode != oldinode ||
53340 + match->device != olddevice ||
53341 + !(match->mode & GR_DELETED)))
53342 + match = match->next;
53343 +
53344 + if (match && (match->inode == oldinode)
53345 + && (match->device == olddevice)
53346 + && (match->mode & GR_DELETED)) {
53347 + if (match->prev == NULL) {
53348 + role->subj_hash[index] = match->next;
53349 + if (match->next != NULL)
53350 + match->next->prev = NULL;
53351 + } else {
53352 + match->prev->next = match->next;
53353 + if (match->next != NULL)
53354 + match->next->prev = match->prev;
53355 + }
53356 + match->prev = NULL;
53357 + match->next = NULL;
53358 + match->inode = newinode;
53359 + match->device = newdevice;
53360 + match->mode &= ~GR_DELETED;
53361 +
53362 + insert_acl_subj_label(match, role);
53363 + }
53364 +
53365 + return;
53366 +}
53367 +
53368 +static void
53369 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53370 + const ino_t newinode, const dev_t newdevice)
53371 +{
53372 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53373 + struct inodev_entry *match;
53374 +
53375 + match = inodev_set.i_hash[index];
53376 +
53377 + while (match && (match->nentry->inode != oldinode ||
53378 + match->nentry->device != olddevice || !match->nentry->deleted))
53379 + match = match->next;
53380 +
53381 + if (match && (match->nentry->inode == oldinode)
53382 + && (match->nentry->device == olddevice) &&
53383 + match->nentry->deleted) {
53384 + if (match->prev == NULL) {
53385 + inodev_set.i_hash[index] = match->next;
53386 + if (match->next != NULL)
53387 + match->next->prev = NULL;
53388 + } else {
53389 + match->prev->next = match->next;
53390 + if (match->next != NULL)
53391 + match->next->prev = match->prev;
53392 + }
53393 + match->prev = NULL;
53394 + match->next = NULL;
53395 + match->nentry->inode = newinode;
53396 + match->nentry->device = newdevice;
53397 + match->nentry->deleted = 0;
53398 +
53399 + insert_inodev_entry(match);
53400 + }
53401 +
53402 + return;
53403 +}
53404 +
53405 +static void
53406 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53407 +{
53408 + struct acl_subject_label *subj;
53409 + struct acl_role_label *role;
53410 + unsigned int x;
53411 +
53412 + FOR_EACH_ROLE_START(role)
53413 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53414 +
53415 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53416 + if ((subj->inode == ino) && (subj->device == dev)) {
53417 + subj->inode = ino;
53418 + subj->device = dev;
53419 + }
53420 + FOR_EACH_NESTED_SUBJECT_END(subj)
53421 + FOR_EACH_SUBJECT_START(role, subj, x)
53422 + update_acl_obj_label(matchn->inode, matchn->device,
53423 + ino, dev, subj);
53424 + FOR_EACH_SUBJECT_END(subj,x)
53425 + FOR_EACH_ROLE_END(role)
53426 +
53427 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53428 +
53429 + return;
53430 +}
53431 +
53432 +static void
53433 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53434 + const struct vfsmount *mnt)
53435 +{
53436 + ino_t ino = dentry->d_inode->i_ino;
53437 + dev_t dev = __get_dev(dentry);
53438 +
53439 + __do_handle_create(matchn, ino, dev);
53440 +
53441 + return;
53442 +}
53443 +
53444 +void
53445 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53446 +{
53447 + struct name_entry *matchn;
53448 +
53449 + if (unlikely(!(gr_status & GR_READY)))
53450 + return;
53451 +
53452 + preempt_disable();
53453 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53454 +
53455 + if (unlikely((unsigned long)matchn)) {
53456 + write_lock(&gr_inode_lock);
53457 + do_handle_create(matchn, dentry, mnt);
53458 + write_unlock(&gr_inode_lock);
53459 + }
53460 + preempt_enable();
53461 +
53462 + return;
53463 +}
53464 +
53465 +void
53466 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53467 +{
53468 + struct name_entry *matchn;
53469 +
53470 + if (unlikely(!(gr_status & GR_READY)))
53471 + return;
53472 +
53473 + preempt_disable();
53474 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53475 +
53476 + if (unlikely((unsigned long)matchn)) {
53477 + write_lock(&gr_inode_lock);
53478 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53479 + write_unlock(&gr_inode_lock);
53480 + }
53481 + preempt_enable();
53482 +
53483 + return;
53484 +}
53485 +
53486 +void
53487 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53488 + struct dentry *old_dentry,
53489 + struct dentry *new_dentry,
53490 + struct vfsmount *mnt, const __u8 replace)
53491 +{
53492 + struct name_entry *matchn;
53493 + struct inodev_entry *inodev;
53494 + struct inode *inode = new_dentry->d_inode;
53495 + ino_t old_ino = old_dentry->d_inode->i_ino;
53496 + dev_t old_dev = __get_dev(old_dentry);
53497 +
53498 + /* vfs_rename swaps the name and parent link for old_dentry and
53499 + new_dentry
53500 + at this point, old_dentry has the new name, parent link, and inode
53501 + for the renamed file
53502 + if a file is being replaced by a rename, new_dentry has the inode
53503 + and name for the replaced file
53504 + */
53505 +
53506 + if (unlikely(!(gr_status & GR_READY)))
53507 + return;
53508 +
53509 + preempt_disable();
53510 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53511 +
53512 + /* we wouldn't have to check d_inode if it weren't for
53513 + NFS silly-renaming
53514 + */
53515 +
53516 + write_lock(&gr_inode_lock);
53517 + if (unlikely(replace && inode)) {
53518 + ino_t new_ino = inode->i_ino;
53519 + dev_t new_dev = __get_dev(new_dentry);
53520 +
53521 + inodev = lookup_inodev_entry(new_ino, new_dev);
53522 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53523 + do_handle_delete(inodev, new_ino, new_dev);
53524 + }
53525 +
53526 + inodev = lookup_inodev_entry(old_ino, old_dev);
53527 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53528 + do_handle_delete(inodev, old_ino, old_dev);
53529 +
53530 + if (unlikely((unsigned long)matchn))
53531 + do_handle_create(matchn, old_dentry, mnt);
53532 +
53533 + write_unlock(&gr_inode_lock);
53534 + preempt_enable();
53535 +
53536 + return;
53537 +}
53538 +
53539 +static int
53540 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53541 + unsigned char **sum)
53542 +{
53543 + struct acl_role_label *r;
53544 + struct role_allowed_ip *ipp;
53545 + struct role_transition *trans;
53546 + unsigned int i;
53547 + int found = 0;
53548 + u32 curr_ip = current->signal->curr_ip;
53549 +
53550 + current->signal->saved_ip = curr_ip;
53551 +
53552 + /* check transition table */
53553 +
53554 + for (trans = current->role->transitions; trans; trans = trans->next) {
53555 + if (!strcmp(rolename, trans->rolename)) {
53556 + found = 1;
53557 + break;
53558 + }
53559 + }
53560 +
53561 + if (!found)
53562 + return 0;
53563 +
53564 + /* handle special roles that do not require authentication
53565 + and check ip */
53566 +
53567 + FOR_EACH_ROLE_START(r)
53568 + if (!strcmp(rolename, r->rolename) &&
53569 + (r->roletype & GR_ROLE_SPECIAL)) {
53570 + found = 0;
53571 + if (r->allowed_ips != NULL) {
53572 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53573 + if ((ntohl(curr_ip) & ipp->netmask) ==
53574 + (ntohl(ipp->addr) & ipp->netmask))
53575 + found = 1;
53576 + }
53577 + } else
53578 + found = 2;
53579 + if (!found)
53580 + return 0;
53581 +
53582 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53583 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53584 + *salt = NULL;
53585 + *sum = NULL;
53586 + return 1;
53587 + }
53588 + }
53589 + FOR_EACH_ROLE_END(r)
53590 +
53591 + for (i = 0; i < num_sprole_pws; i++) {
53592 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53593 + *salt = acl_special_roles[i]->salt;
53594 + *sum = acl_special_roles[i]->sum;
53595 + return 1;
53596 + }
53597 + }
53598 +
53599 + return 0;
53600 +}
53601 +
53602 +static void
53603 +assign_special_role(char *rolename)
53604 +{
53605 + struct acl_object_label *obj;
53606 + struct acl_role_label *r;
53607 + struct acl_role_label *assigned = NULL;
53608 + struct task_struct *tsk;
53609 + struct file *filp;
53610 +
53611 + FOR_EACH_ROLE_START(r)
53612 + if (!strcmp(rolename, r->rolename) &&
53613 + (r->roletype & GR_ROLE_SPECIAL)) {
53614 + assigned = r;
53615 + break;
53616 + }
53617 + FOR_EACH_ROLE_END(r)
53618 +
53619 + if (!assigned)
53620 + return;
53621 +
53622 + read_lock(&tasklist_lock);
53623 + read_lock(&grsec_exec_file_lock);
53624 +
53625 + tsk = current->real_parent;
53626 + if (tsk == NULL)
53627 + goto out_unlock;
53628 +
53629 + filp = tsk->exec_file;
53630 + if (filp == NULL)
53631 + goto out_unlock;
53632 +
53633 + tsk->is_writable = 0;
53634 +
53635 + tsk->acl_sp_role = 1;
53636 + tsk->acl_role_id = ++acl_sp_role_value;
53637 + tsk->role = assigned;
53638 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53639 +
53640 + /* ignore additional mmap checks for processes that are writable
53641 + by the default ACL */
53642 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53643 + if (unlikely(obj->mode & GR_WRITE))
53644 + tsk->is_writable = 1;
53645 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53646 + if (unlikely(obj->mode & GR_WRITE))
53647 + tsk->is_writable = 1;
53648 +
53649 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53650 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53651 +#endif
53652 +
53653 +out_unlock:
53654 + read_unlock(&grsec_exec_file_lock);
53655 + read_unlock(&tasklist_lock);
53656 + return;
53657 +}
53658 +
53659 +int gr_check_secure_terminal(struct task_struct *task)
53660 +{
53661 + struct task_struct *p, *p2, *p3;
53662 + struct files_struct *files;
53663 + struct fdtable *fdt;
53664 + struct file *our_file = NULL, *file;
53665 + int i;
53666 +
53667 + if (task->signal->tty == NULL)
53668 + return 1;
53669 +
53670 + files = get_files_struct(task);
53671 + if (files != NULL) {
53672 + rcu_read_lock();
53673 + fdt = files_fdtable(files);
53674 + for (i=0; i < fdt->max_fds; i++) {
53675 + file = fcheck_files(files, i);
53676 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53677 + get_file(file);
53678 + our_file = file;
53679 + }
53680 + }
53681 + rcu_read_unlock();
53682 + put_files_struct(files);
53683 + }
53684 +
53685 + if (our_file == NULL)
53686 + return 1;
53687 +
53688 + read_lock(&tasklist_lock);
53689 + do_each_thread(p2, p) {
53690 + files = get_files_struct(p);
53691 + if (files == NULL ||
53692 + (p->signal && p->signal->tty == task->signal->tty)) {
53693 + if (files != NULL)
53694 + put_files_struct(files);
53695 + continue;
53696 + }
53697 + rcu_read_lock();
53698 + fdt = files_fdtable(files);
53699 + for (i=0; i < fdt->max_fds; i++) {
53700 + file = fcheck_files(files, i);
53701 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53702 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53703 + p3 = task;
53704 + while (p3->pid > 0) {
53705 + if (p3 == p)
53706 + break;
53707 + p3 = p3->real_parent;
53708 + }
53709 + if (p3 == p)
53710 + break;
53711 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53712 + gr_handle_alertkill(p);
53713 + rcu_read_unlock();
53714 + put_files_struct(files);
53715 + read_unlock(&tasklist_lock);
53716 + fput(our_file);
53717 + return 0;
53718 + }
53719 + }
53720 + rcu_read_unlock();
53721 + put_files_struct(files);
53722 + } while_each_thread(p2, p);
53723 + read_unlock(&tasklist_lock);
53724 +
53725 + fput(our_file);
53726 + return 1;
53727 +}
53728 +
53729 +ssize_t
53730 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53731 +{
53732 + struct gr_arg_wrapper uwrap;
53733 + unsigned char *sprole_salt = NULL;
53734 + unsigned char *sprole_sum = NULL;
53735 + int error = sizeof (struct gr_arg_wrapper);
53736 + int error2 = 0;
53737 +
53738 + mutex_lock(&gr_dev_mutex);
53739 +
53740 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53741 + error = -EPERM;
53742 + goto out;
53743 + }
53744 +
53745 + if (count != sizeof (struct gr_arg_wrapper)) {
53746 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53747 + error = -EINVAL;
53748 + goto out;
53749 + }
53750 +
53751 +
53752 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53753 + gr_auth_expires = 0;
53754 + gr_auth_attempts = 0;
53755 + }
53756 +
53757 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53758 + error = -EFAULT;
53759 + goto out;
53760 + }
53761 +
53762 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53763 + error = -EINVAL;
53764 + goto out;
53765 + }
53766 +
53767 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53768 + error = -EFAULT;
53769 + goto out;
53770 + }
53771 +
53772 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53773 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53774 + time_after(gr_auth_expires, get_seconds())) {
53775 + error = -EBUSY;
53776 + goto out;
53777 + }
53778 +
53779 + /* if non-root trying to do anything other than use a special role,
53780 + do not attempt authentication, do not count towards authentication
53781 + locking
53782 + */
53783 +
53784 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53785 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53786 + current_uid()) {
53787 + error = -EPERM;
53788 + goto out;
53789 + }
53790 +
53791 + /* ensure pw and special role name are null terminated */
53792 +
53793 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53794 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53795 +
53796 + /* Okay.
53797 + * We have our enough of the argument structure..(we have yet
53798 + * to copy_from_user the tables themselves) . Copy the tables
53799 + * only if we need them, i.e. for loading operations. */
53800 +
53801 + switch (gr_usermode->mode) {
53802 + case GR_STATUS:
53803 + if (gr_status & GR_READY) {
53804 + error = 1;
53805 + if (!gr_check_secure_terminal(current))
53806 + error = 3;
53807 + } else
53808 + error = 2;
53809 + goto out;
53810 + case GR_SHUTDOWN:
53811 + if ((gr_status & GR_READY)
53812 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53813 + pax_open_kernel();
53814 + gr_status &= ~GR_READY;
53815 + pax_close_kernel();
53816 +
53817 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53818 + free_variables();
53819 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53820 + memset(gr_system_salt, 0, GR_SALT_LEN);
53821 + memset(gr_system_sum, 0, GR_SHA_LEN);
53822 + } else if (gr_status & GR_READY) {
53823 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53824 + error = -EPERM;
53825 + } else {
53826 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53827 + error = -EAGAIN;
53828 + }
53829 + break;
53830 + case GR_ENABLE:
53831 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53832 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53833 + else {
53834 + if (gr_status & GR_READY)
53835 + error = -EAGAIN;
53836 + else
53837 + error = error2;
53838 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53839 + }
53840 + break;
53841 + case GR_RELOAD:
53842 + if (!(gr_status & GR_READY)) {
53843 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53844 + error = -EAGAIN;
53845 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53846 + preempt_disable();
53847 +
53848 + pax_open_kernel();
53849 + gr_status &= ~GR_READY;
53850 + pax_close_kernel();
53851 +
53852 + free_variables();
53853 + if (!(error2 = gracl_init(gr_usermode))) {
53854 + preempt_enable();
53855 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53856 + } else {
53857 + preempt_enable();
53858 + error = error2;
53859 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53860 + }
53861 + } else {
53862 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53863 + error = -EPERM;
53864 + }
53865 + break;
53866 + case GR_SEGVMOD:
53867 + if (unlikely(!(gr_status & GR_READY))) {
53868 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53869 + error = -EAGAIN;
53870 + break;
53871 + }
53872 +
53873 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53874 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53875 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53876 + struct acl_subject_label *segvacl;
53877 + segvacl =
53878 + lookup_acl_subj_label(gr_usermode->segv_inode,
53879 + gr_usermode->segv_device,
53880 + current->role);
53881 + if (segvacl) {
53882 + segvacl->crashes = 0;
53883 + segvacl->expires = 0;
53884 + }
53885 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53886 + gr_remove_uid(gr_usermode->segv_uid);
53887 + }
53888 + } else {
53889 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53890 + error = -EPERM;
53891 + }
53892 + break;
53893 + case GR_SPROLE:
53894 + case GR_SPROLEPAM:
53895 + if (unlikely(!(gr_status & GR_READY))) {
53896 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53897 + error = -EAGAIN;
53898 + break;
53899 + }
53900 +
53901 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53902 + current->role->expires = 0;
53903 + current->role->auth_attempts = 0;
53904 + }
53905 +
53906 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53907 + time_after(current->role->expires, get_seconds())) {
53908 + error = -EBUSY;
53909 + goto out;
53910 + }
53911 +
53912 + if (lookup_special_role_auth
53913 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53914 + && ((!sprole_salt && !sprole_sum)
53915 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53916 + char *p = "";
53917 + assign_special_role(gr_usermode->sp_role);
53918 + read_lock(&tasklist_lock);
53919 + if (current->real_parent)
53920 + p = current->real_parent->role->rolename;
53921 + read_unlock(&tasklist_lock);
53922 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53923 + p, acl_sp_role_value);
53924 + } else {
53925 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53926 + error = -EPERM;
53927 + if(!(current->role->auth_attempts++))
53928 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53929 +
53930 + goto out;
53931 + }
53932 + break;
53933 + case GR_UNSPROLE:
53934 + if (unlikely(!(gr_status & GR_READY))) {
53935 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53936 + error = -EAGAIN;
53937 + break;
53938 + }
53939 +
53940 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53941 + char *p = "";
53942 + int i = 0;
53943 +
53944 + read_lock(&tasklist_lock);
53945 + if (current->real_parent) {
53946 + p = current->real_parent->role->rolename;
53947 + i = current->real_parent->acl_role_id;
53948 + }
53949 + read_unlock(&tasklist_lock);
53950 +
53951 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53952 + gr_set_acls(1);
53953 + } else {
53954 + error = -EPERM;
53955 + goto out;
53956 + }
53957 + break;
53958 + default:
53959 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53960 + error = -EINVAL;
53961 + break;
53962 + }
53963 +
53964 + if (error != -EPERM)
53965 + goto out;
53966 +
53967 + if(!(gr_auth_attempts++))
53968 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53969 +
53970 + out:
53971 + mutex_unlock(&gr_dev_mutex);
53972 + return error;
53973 +}
53974 +
53975 +/* must be called with
53976 + rcu_read_lock();
53977 + read_lock(&tasklist_lock);
53978 + read_lock(&grsec_exec_file_lock);
53979 +*/
53980 +int gr_apply_subject_to_task(struct task_struct *task)
53981 +{
53982 + struct acl_object_label *obj;
53983 + char *tmpname;
53984 + struct acl_subject_label *tmpsubj;
53985 + struct file *filp;
53986 + struct name_entry *nmatch;
53987 +
53988 + filp = task->exec_file;
53989 + if (filp == NULL)
53990 + return 0;
53991 +
53992 + /* the following is to apply the correct subject
53993 + on binaries running when the RBAC system
53994 + is enabled, when the binaries have been
53995 + replaced or deleted since their execution
53996 + -----
53997 + when the RBAC system starts, the inode/dev
53998 + from exec_file will be one the RBAC system
53999 + is unaware of. It only knows the inode/dev
54000 + of the present file on disk, or the absence
54001 + of it.
54002 + */
54003 + preempt_disable();
54004 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54005 +
54006 + nmatch = lookup_name_entry(tmpname);
54007 + preempt_enable();
54008 + tmpsubj = NULL;
54009 + if (nmatch) {
54010 + if (nmatch->deleted)
54011 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54012 + else
54013 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54014 + if (tmpsubj != NULL)
54015 + task->acl = tmpsubj;
54016 + }
54017 + if (tmpsubj == NULL)
54018 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54019 + task->role);
54020 + if (task->acl) {
54021 + task->is_writable = 0;
54022 + /* ignore additional mmap checks for processes that are writable
54023 + by the default ACL */
54024 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54025 + if (unlikely(obj->mode & GR_WRITE))
54026 + task->is_writable = 1;
54027 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54028 + if (unlikely(obj->mode & GR_WRITE))
54029 + task->is_writable = 1;
54030 +
54031 + gr_set_proc_res(task);
54032 +
54033 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54034 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54035 +#endif
54036 + } else {
54037 + return 1;
54038 + }
54039 +
54040 + return 0;
54041 +}
54042 +
54043 +int
54044 +gr_set_acls(const int type)
54045 +{
54046 + struct task_struct *task, *task2;
54047 + struct acl_role_label *role = current->role;
54048 + __u16 acl_role_id = current->acl_role_id;
54049 + const struct cred *cred;
54050 + int ret;
54051 +
54052 + rcu_read_lock();
54053 + read_lock(&tasklist_lock);
54054 + read_lock(&grsec_exec_file_lock);
54055 + do_each_thread(task2, task) {
54056 + /* check to see if we're called from the exit handler,
54057 + if so, only replace ACLs that have inherited the admin
54058 + ACL */
54059 +
54060 + if (type && (task->role != role ||
54061 + task->acl_role_id != acl_role_id))
54062 + continue;
54063 +
54064 + task->acl_role_id = 0;
54065 + task->acl_sp_role = 0;
54066 +
54067 + if (task->exec_file) {
54068 + cred = __task_cred(task);
54069 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54070 + ret = gr_apply_subject_to_task(task);
54071 + if (ret) {
54072 + read_unlock(&grsec_exec_file_lock);
54073 + read_unlock(&tasklist_lock);
54074 + rcu_read_unlock();
54075 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54076 + return ret;
54077 + }
54078 + } else {
54079 + // it's a kernel process
54080 + task->role = kernel_role;
54081 + task->acl = kernel_role->root_label;
54082 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54083 + task->acl->mode &= ~GR_PROCFIND;
54084 +#endif
54085 + }
54086 + } while_each_thread(task2, task);
54087 + read_unlock(&grsec_exec_file_lock);
54088 + read_unlock(&tasklist_lock);
54089 + rcu_read_unlock();
54090 +
54091 + return 0;
54092 +}
54093 +
54094 +void
54095 +gr_learn_resource(const struct task_struct *task,
54096 + const int res, const unsigned long wanted, const int gt)
54097 +{
54098 + struct acl_subject_label *acl;
54099 + const struct cred *cred;
54100 +
54101 + if (unlikely((gr_status & GR_READY) &&
54102 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54103 + goto skip_reslog;
54104 +
54105 +#ifdef CONFIG_GRKERNSEC_RESLOG
54106 + gr_log_resource(task, res, wanted, gt);
54107 +#endif
54108 + skip_reslog:
54109 +
54110 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54111 + return;
54112 +
54113 + acl = task->acl;
54114 +
54115 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54116 + !(acl->resmask & (1 << (unsigned short) res))))
54117 + return;
54118 +
54119 + if (wanted >= acl->res[res].rlim_cur) {
54120 + unsigned long res_add;
54121 +
54122 + res_add = wanted;
54123 + switch (res) {
54124 + case RLIMIT_CPU:
54125 + res_add += GR_RLIM_CPU_BUMP;
54126 + break;
54127 + case RLIMIT_FSIZE:
54128 + res_add += GR_RLIM_FSIZE_BUMP;
54129 + break;
54130 + case RLIMIT_DATA:
54131 + res_add += GR_RLIM_DATA_BUMP;
54132 + break;
54133 + case RLIMIT_STACK:
54134 + res_add += GR_RLIM_STACK_BUMP;
54135 + break;
54136 + case RLIMIT_CORE:
54137 + res_add += GR_RLIM_CORE_BUMP;
54138 + break;
54139 + case RLIMIT_RSS:
54140 + res_add += GR_RLIM_RSS_BUMP;
54141 + break;
54142 + case RLIMIT_NPROC:
54143 + res_add += GR_RLIM_NPROC_BUMP;
54144 + break;
54145 + case RLIMIT_NOFILE:
54146 + res_add += GR_RLIM_NOFILE_BUMP;
54147 + break;
54148 + case RLIMIT_MEMLOCK:
54149 + res_add += GR_RLIM_MEMLOCK_BUMP;
54150 + break;
54151 + case RLIMIT_AS:
54152 + res_add += GR_RLIM_AS_BUMP;
54153 + break;
54154 + case RLIMIT_LOCKS:
54155 + res_add += GR_RLIM_LOCKS_BUMP;
54156 + break;
54157 + case RLIMIT_SIGPENDING:
54158 + res_add += GR_RLIM_SIGPENDING_BUMP;
54159 + break;
54160 + case RLIMIT_MSGQUEUE:
54161 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54162 + break;
54163 + case RLIMIT_NICE:
54164 + res_add += GR_RLIM_NICE_BUMP;
54165 + break;
54166 + case RLIMIT_RTPRIO:
54167 + res_add += GR_RLIM_RTPRIO_BUMP;
54168 + break;
54169 + case RLIMIT_RTTIME:
54170 + res_add += GR_RLIM_RTTIME_BUMP;
54171 + break;
54172 + }
54173 +
54174 + acl->res[res].rlim_cur = res_add;
54175 +
54176 + if (wanted > acl->res[res].rlim_max)
54177 + acl->res[res].rlim_max = res_add;
54178 +
54179 + /* only log the subject filename, since resource logging is supported for
54180 + single-subject learning only */
54181 + rcu_read_lock();
54182 + cred = __task_cred(task);
54183 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54184 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54185 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54186 + "", (unsigned long) res, &task->signal->saved_ip);
54187 + rcu_read_unlock();
54188 + }
54189 +
54190 + return;
54191 +}
54192 +
54193 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54194 +void
54195 +pax_set_initial_flags(struct linux_binprm *bprm)
54196 +{
54197 + struct task_struct *task = current;
54198 + struct acl_subject_label *proc;
54199 + unsigned long flags;
54200 +
54201 + if (unlikely(!(gr_status & GR_READY)))
54202 + return;
54203 +
54204 + flags = pax_get_flags(task);
54205 +
54206 + proc = task->acl;
54207 +
54208 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54209 + flags &= ~MF_PAX_PAGEEXEC;
54210 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54211 + flags &= ~MF_PAX_SEGMEXEC;
54212 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54213 + flags &= ~MF_PAX_RANDMMAP;
54214 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54215 + flags &= ~MF_PAX_EMUTRAMP;
54216 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54217 + flags &= ~MF_PAX_MPROTECT;
54218 +
54219 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54220 + flags |= MF_PAX_PAGEEXEC;
54221 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54222 + flags |= MF_PAX_SEGMEXEC;
54223 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54224 + flags |= MF_PAX_RANDMMAP;
54225 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54226 + flags |= MF_PAX_EMUTRAMP;
54227 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54228 + flags |= MF_PAX_MPROTECT;
54229 +
54230 + pax_set_flags(task, flags);
54231 +
54232 + return;
54233 +}
54234 +#endif
54235 +
54236 +#ifdef CONFIG_SYSCTL
54237 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54238 + system to save 35kb of memory */
54239 +
54240 +/* we modify the passed in filename, but adjust it back before returning */
54241 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54242 +{
54243 + struct name_entry *nmatch;
54244 + char *p, *lastp = NULL;
54245 + struct acl_object_label *obj = NULL, *tmp;
54246 + struct acl_subject_label *tmpsubj;
54247 + char c = '\0';
54248 +
54249 + read_lock(&gr_inode_lock);
54250 +
54251 + p = name + len - 1;
54252 + do {
54253 + nmatch = lookup_name_entry(name);
54254 + if (lastp != NULL)
54255 + *lastp = c;
54256 +
54257 + if (nmatch == NULL)
54258 + goto next_component;
54259 + tmpsubj = current->acl;
54260 + do {
54261 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54262 + if (obj != NULL) {
54263 + tmp = obj->globbed;
54264 + while (tmp) {
54265 + if (!glob_match(tmp->filename, name)) {
54266 + obj = tmp;
54267 + goto found_obj;
54268 + }
54269 + tmp = tmp->next;
54270 + }
54271 + goto found_obj;
54272 + }
54273 + } while ((tmpsubj = tmpsubj->parent_subject));
54274 +next_component:
54275 + /* end case */
54276 + if (p == name)
54277 + break;
54278 +
54279 + while (*p != '/')
54280 + p--;
54281 + if (p == name)
54282 + lastp = p + 1;
54283 + else {
54284 + lastp = p;
54285 + p--;
54286 + }
54287 + c = *lastp;
54288 + *lastp = '\0';
54289 + } while (1);
54290 +found_obj:
54291 + read_unlock(&gr_inode_lock);
54292 + /* obj returned will always be non-null */
54293 + return obj;
54294 +}
54295 +
54296 +/* returns 0 when allowing, non-zero on error
54297 + op of 0 is used for readdir, so we don't log the names of hidden files
54298 +*/
54299 +__u32
54300 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54301 +{
54302 + struct ctl_table *tmp;
54303 + const char *proc_sys = "/proc/sys";
54304 + char *path;
54305 + struct acl_object_label *obj;
54306 + unsigned short len = 0, pos = 0, depth = 0, i;
54307 + __u32 err = 0;
54308 + __u32 mode = 0;
54309 +
54310 + if (unlikely(!(gr_status & GR_READY)))
54311 + return 0;
54312 +
54313 + /* for now, ignore operations on non-sysctl entries if it's not a
54314 + readdir*/
54315 + if (table->child != NULL && op != 0)
54316 + return 0;
54317 +
54318 + mode |= GR_FIND;
54319 + /* it's only a read if it's an entry, read on dirs is for readdir */
54320 + if (op & MAY_READ)
54321 + mode |= GR_READ;
54322 + if (op & MAY_WRITE)
54323 + mode |= GR_WRITE;
54324 +
54325 + preempt_disable();
54326 +
54327 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54328 +
54329 + /* it's only a read/write if it's an actual entry, not a dir
54330 + (which are opened for readdir)
54331 + */
54332 +
54333 + /* convert the requested sysctl entry into a pathname */
54334 +
54335 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54336 + len += strlen(tmp->procname);
54337 + len++;
54338 + depth++;
54339 + }
54340 +
54341 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54342 + /* deny */
54343 + goto out;
54344 + }
54345 +
54346 + memset(path, 0, PAGE_SIZE);
54347 +
54348 + memcpy(path, proc_sys, strlen(proc_sys));
54349 +
54350 + pos += strlen(proc_sys);
54351 +
54352 + for (; depth > 0; depth--) {
54353 + path[pos] = '/';
54354 + pos++;
54355 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54356 + if (depth == i) {
54357 + memcpy(path + pos, tmp->procname,
54358 + strlen(tmp->procname));
54359 + pos += strlen(tmp->procname);
54360 + }
54361 + i++;
54362 + }
54363 + }
54364 +
54365 + obj = gr_lookup_by_name(path, pos);
54366 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54367 +
54368 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54369 + ((err & mode) != mode))) {
54370 + __u32 new_mode = mode;
54371 +
54372 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54373 +
54374 + err = 0;
54375 + gr_log_learn_sysctl(path, new_mode);
54376 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54377 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54378 + err = -ENOENT;
54379 + } else if (!(err & GR_FIND)) {
54380 + err = -ENOENT;
54381 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54382 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54383 + path, (mode & GR_READ) ? " reading" : "",
54384 + (mode & GR_WRITE) ? " writing" : "");
54385 + err = -EACCES;
54386 + } else if ((err & mode) != mode) {
54387 + err = -EACCES;
54388 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54389 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54390 + path, (mode & GR_READ) ? " reading" : "",
54391 + (mode & GR_WRITE) ? " writing" : "");
54392 + err = 0;
54393 + } else
54394 + err = 0;
54395 +
54396 + out:
54397 + preempt_enable();
54398 +
54399 + return err;
54400 +}
54401 +#endif
54402 +
54403 +int
54404 +gr_handle_proc_ptrace(struct task_struct *task)
54405 +{
54406 + struct file *filp;
54407 + struct task_struct *tmp = task;
54408 + struct task_struct *curtemp = current;
54409 + __u32 retmode;
54410 +
54411 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54412 + if (unlikely(!(gr_status & GR_READY)))
54413 + return 0;
54414 +#endif
54415 +
54416 + read_lock(&tasklist_lock);
54417 + read_lock(&grsec_exec_file_lock);
54418 + filp = task->exec_file;
54419 +
54420 + while (tmp->pid > 0) {
54421 + if (tmp == curtemp)
54422 + break;
54423 + tmp = tmp->real_parent;
54424 + }
54425 +
54426 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54427 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54428 + read_unlock(&grsec_exec_file_lock);
54429 + read_unlock(&tasklist_lock);
54430 + return 1;
54431 + }
54432 +
54433 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54434 + if (!(gr_status & GR_READY)) {
54435 + read_unlock(&grsec_exec_file_lock);
54436 + read_unlock(&tasklist_lock);
54437 + return 0;
54438 + }
54439 +#endif
54440 +
54441 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54442 + read_unlock(&grsec_exec_file_lock);
54443 + read_unlock(&tasklist_lock);
54444 +
54445 + if (retmode & GR_NOPTRACE)
54446 + return 1;
54447 +
54448 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54449 + && (current->acl != task->acl || (current->acl != current->role->root_label
54450 + && current->pid != task->pid)))
54451 + return 1;
54452 +
54453 + return 0;
54454 +}
54455 +
54456 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54457 +{
54458 + if (unlikely(!(gr_status & GR_READY)))
54459 + return;
54460 +
54461 + if (!(current->role->roletype & GR_ROLE_GOD))
54462 + return;
54463 +
54464 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54465 + p->role->rolename, gr_task_roletype_to_char(p),
54466 + p->acl->filename);
54467 +}
54468 +
54469 +int
54470 +gr_handle_ptrace(struct task_struct *task, const long request)
54471 +{
54472 + struct task_struct *tmp = task;
54473 + struct task_struct *curtemp = current;
54474 + __u32 retmode;
54475 +
54476 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54477 + if (unlikely(!(gr_status & GR_READY)))
54478 + return 0;
54479 +#endif
54480 +
54481 + read_lock(&tasklist_lock);
54482 + while (tmp->pid > 0) {
54483 + if (tmp == curtemp)
54484 + break;
54485 + tmp = tmp->real_parent;
54486 + }
54487 +
54488 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54489 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54490 + read_unlock(&tasklist_lock);
54491 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54492 + return 1;
54493 + }
54494 + read_unlock(&tasklist_lock);
54495 +
54496 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54497 + if (!(gr_status & GR_READY))
54498 + return 0;
54499 +#endif
54500 +
54501 + read_lock(&grsec_exec_file_lock);
54502 + if (unlikely(!task->exec_file)) {
54503 + read_unlock(&grsec_exec_file_lock);
54504 + return 0;
54505 + }
54506 +
54507 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54508 + read_unlock(&grsec_exec_file_lock);
54509 +
54510 + if (retmode & GR_NOPTRACE) {
54511 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54512 + return 1;
54513 + }
54514 +
54515 + if (retmode & GR_PTRACERD) {
54516 + switch (request) {
54517 + case PTRACE_SEIZE:
54518 + case PTRACE_POKETEXT:
54519 + case PTRACE_POKEDATA:
54520 + case PTRACE_POKEUSR:
54521 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54522 + case PTRACE_SETREGS:
54523 + case PTRACE_SETFPREGS:
54524 +#endif
54525 +#ifdef CONFIG_X86
54526 + case PTRACE_SETFPXREGS:
54527 +#endif
54528 +#ifdef CONFIG_ALTIVEC
54529 + case PTRACE_SETVRREGS:
54530 +#endif
54531 + return 1;
54532 + default:
54533 + return 0;
54534 + }
54535 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54536 + !(current->role->roletype & GR_ROLE_GOD) &&
54537 + (current->acl != task->acl)) {
54538 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54539 + return 1;
54540 + }
54541 +
54542 + return 0;
54543 +}
54544 +
54545 +static int is_writable_mmap(const struct file *filp)
54546 +{
54547 + struct task_struct *task = current;
54548 + struct acl_object_label *obj, *obj2;
54549 +
54550 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54551 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54552 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54553 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54554 + task->role->root_label);
54555 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54556 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54557 + return 1;
54558 + }
54559 + }
54560 + return 0;
54561 +}
54562 +
54563 +int
54564 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54565 +{
54566 + __u32 mode;
54567 +
54568 + if (unlikely(!file || !(prot & PROT_EXEC)))
54569 + return 1;
54570 +
54571 + if (is_writable_mmap(file))
54572 + return 0;
54573 +
54574 + mode =
54575 + gr_search_file(file->f_path.dentry,
54576 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54577 + file->f_path.mnt);
54578 +
54579 + if (!gr_tpe_allow(file))
54580 + return 0;
54581 +
54582 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54583 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54584 + return 0;
54585 + } else if (unlikely(!(mode & GR_EXEC))) {
54586 + return 0;
54587 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54588 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54589 + return 1;
54590 + }
54591 +
54592 + return 1;
54593 +}
54594 +
54595 +int
54596 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54597 +{
54598 + __u32 mode;
54599 +
54600 + if (unlikely(!file || !(prot & PROT_EXEC)))
54601 + return 1;
54602 +
54603 + if (is_writable_mmap(file))
54604 + return 0;
54605 +
54606 + mode =
54607 + gr_search_file(file->f_path.dentry,
54608 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54609 + file->f_path.mnt);
54610 +
54611 + if (!gr_tpe_allow(file))
54612 + return 0;
54613 +
54614 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54615 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54616 + return 0;
54617 + } else if (unlikely(!(mode & GR_EXEC))) {
54618 + return 0;
54619 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54620 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54621 + return 1;
54622 + }
54623 +
54624 + return 1;
54625 +}
54626 +
54627 +void
54628 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54629 +{
54630 + unsigned long runtime;
54631 + unsigned long cputime;
54632 + unsigned int wday, cday;
54633 + __u8 whr, chr;
54634 + __u8 wmin, cmin;
54635 + __u8 wsec, csec;
54636 + struct timespec timeval;
54637 +
54638 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54639 + !(task->acl->mode & GR_PROCACCT)))
54640 + return;
54641 +
54642 + do_posix_clock_monotonic_gettime(&timeval);
54643 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54644 + wday = runtime / (3600 * 24);
54645 + runtime -= wday * (3600 * 24);
54646 + whr = runtime / 3600;
54647 + runtime -= whr * 3600;
54648 + wmin = runtime / 60;
54649 + runtime -= wmin * 60;
54650 + wsec = runtime;
54651 +
54652 + cputime = (task->utime + task->stime) / HZ;
54653 + cday = cputime / (3600 * 24);
54654 + cputime -= cday * (3600 * 24);
54655 + chr = cputime / 3600;
54656 + cputime -= chr * 3600;
54657 + cmin = cputime / 60;
54658 + cputime -= cmin * 60;
54659 + csec = cputime;
54660 +
54661 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54662 +
54663 + return;
54664 +}
54665 +
54666 +void gr_set_kernel_label(struct task_struct *task)
54667 +{
54668 + if (gr_status & GR_READY) {
54669 + task->role = kernel_role;
54670 + task->acl = kernel_role->root_label;
54671 + }
54672 + return;
54673 +}
54674 +
54675 +#ifdef CONFIG_TASKSTATS
54676 +int gr_is_taskstats_denied(int pid)
54677 +{
54678 + struct task_struct *task;
54679 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54680 + const struct cred *cred;
54681 +#endif
54682 + int ret = 0;
54683 +
54684 + /* restrict taskstats viewing to un-chrooted root users
54685 + who have the 'view' subject flag if the RBAC system is enabled
54686 + */
54687 +
54688 + rcu_read_lock();
54689 + read_lock(&tasklist_lock);
54690 + task = find_task_by_vpid(pid);
54691 + if (task) {
54692 +#ifdef CONFIG_GRKERNSEC_CHROOT
54693 + if (proc_is_chrooted(task))
54694 + ret = -EACCES;
54695 +#endif
54696 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54697 + cred = __task_cred(task);
54698 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54699 + if (cred->uid != 0)
54700 + ret = -EACCES;
54701 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54702 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54703 + ret = -EACCES;
54704 +#endif
54705 +#endif
54706 + if (gr_status & GR_READY) {
54707 + if (!(task->acl->mode & GR_VIEW))
54708 + ret = -EACCES;
54709 + }
54710 + } else
54711 + ret = -ENOENT;
54712 +
54713 + read_unlock(&tasklist_lock);
54714 + rcu_read_unlock();
54715 +
54716 + return ret;
54717 +}
54718 +#endif
54719 +
54720 +/* AUXV entries are filled via a descendant of search_binary_handler
54721 + after we've already applied the subject for the target
54722 +*/
54723 +int gr_acl_enable_at_secure(void)
54724 +{
54725 + if (unlikely(!(gr_status & GR_READY)))
54726 + return 0;
54727 +
54728 + if (current->acl->mode & GR_ATSECURE)
54729 + return 1;
54730 +
54731 + return 0;
54732 +}
54733 +
54734 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54735 +{
54736 + struct task_struct *task = current;
54737 + struct dentry *dentry = file->f_path.dentry;
54738 + struct vfsmount *mnt = file->f_path.mnt;
54739 + struct acl_object_label *obj, *tmp;
54740 + struct acl_subject_label *subj;
54741 + unsigned int bufsize;
54742 + int is_not_root;
54743 + char *path;
54744 + dev_t dev = __get_dev(dentry);
54745 +
54746 + if (unlikely(!(gr_status & GR_READY)))
54747 + return 1;
54748 +
54749 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54750 + return 1;
54751 +
54752 + /* ignore Eric Biederman */
54753 + if (IS_PRIVATE(dentry->d_inode))
54754 + return 1;
54755 +
54756 + subj = task->acl;
54757 + do {
54758 + obj = lookup_acl_obj_label(ino, dev, subj);
54759 + if (obj != NULL)
54760 + return (obj->mode & GR_FIND) ? 1 : 0;
54761 + } while ((subj = subj->parent_subject));
54762 +
54763 + /* this is purely an optimization since we're looking for an object
54764 + for the directory we're doing a readdir on
54765 + if it's possible for any globbed object to match the entry we're
54766 + filling into the directory, then the object we find here will be
54767 + an anchor point with attached globbed objects
54768 + */
54769 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54770 + if (obj->globbed == NULL)
54771 + return (obj->mode & GR_FIND) ? 1 : 0;
54772 +
54773 + is_not_root = ((obj->filename[0] == '/') &&
54774 + (obj->filename[1] == '\0')) ? 0 : 1;
54775 + bufsize = PAGE_SIZE - namelen - is_not_root;
54776 +
54777 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54778 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54779 + return 1;
54780 +
54781 + preempt_disable();
54782 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54783 + bufsize);
54784 +
54785 + bufsize = strlen(path);
54786 +
54787 + /* if base is "/", don't append an additional slash */
54788 + if (is_not_root)
54789 + *(path + bufsize) = '/';
54790 + memcpy(path + bufsize + is_not_root, name, namelen);
54791 + *(path + bufsize + namelen + is_not_root) = '\0';
54792 +
54793 + tmp = obj->globbed;
54794 + while (tmp) {
54795 + if (!glob_match(tmp->filename, path)) {
54796 + preempt_enable();
54797 + return (tmp->mode & GR_FIND) ? 1 : 0;
54798 + }
54799 + tmp = tmp->next;
54800 + }
54801 + preempt_enable();
54802 + return (obj->mode & GR_FIND) ? 1 : 0;
54803 +}
54804 +
54805 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54806 +EXPORT_SYMBOL(gr_acl_is_enabled);
54807 +#endif
54808 +EXPORT_SYMBOL(gr_learn_resource);
54809 +EXPORT_SYMBOL(gr_set_kernel_label);
54810 +#ifdef CONFIG_SECURITY
54811 +EXPORT_SYMBOL(gr_check_user_change);
54812 +EXPORT_SYMBOL(gr_check_group_change);
54813 +#endif
54814 +
54815 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54816 new file mode 100644
54817 index 0000000..34fefda
54818 --- /dev/null
54819 +++ b/grsecurity/gracl_alloc.c
54820 @@ -0,0 +1,105 @@
54821 +#include <linux/kernel.h>
54822 +#include <linux/mm.h>
54823 +#include <linux/slab.h>
54824 +#include <linux/vmalloc.h>
54825 +#include <linux/gracl.h>
54826 +#include <linux/grsecurity.h>
54827 +
54828 +static unsigned long alloc_stack_next = 1;
54829 +static unsigned long alloc_stack_size = 1;
54830 +static void **alloc_stack;
54831 +
54832 +static __inline__ int
54833 +alloc_pop(void)
54834 +{
54835 + if (alloc_stack_next == 1)
54836 + return 0;
54837 +
54838 + kfree(alloc_stack[alloc_stack_next - 2]);
54839 +
54840 + alloc_stack_next--;
54841 +
54842 + return 1;
54843 +}
54844 +
54845 +static __inline__ int
54846 +alloc_push(void *buf)
54847 +{
54848 + if (alloc_stack_next >= alloc_stack_size)
54849 + return 1;
54850 +
54851 + alloc_stack[alloc_stack_next - 1] = buf;
54852 +
54853 + alloc_stack_next++;
54854 +
54855 + return 0;
54856 +}
54857 +
54858 +void *
54859 +acl_alloc(unsigned long len)
54860 +{
54861 + void *ret = NULL;
54862 +
54863 + if (!len || len > PAGE_SIZE)
54864 + goto out;
54865 +
54866 + ret = kmalloc(len, GFP_KERNEL);
54867 +
54868 + if (ret) {
54869 + if (alloc_push(ret)) {
54870 + kfree(ret);
54871 + ret = NULL;
54872 + }
54873 + }
54874 +
54875 +out:
54876 + return ret;
54877 +}
54878 +
54879 +void *
54880 +acl_alloc_num(unsigned long num, unsigned long len)
54881 +{
54882 + if (!len || (num > (PAGE_SIZE / len)))
54883 + return NULL;
54884 +
54885 + return acl_alloc(num * len);
54886 +}
54887 +
54888 +void
54889 +acl_free_all(void)
54890 +{
54891 + if (gr_acl_is_enabled() || !alloc_stack)
54892 + return;
54893 +
54894 + while (alloc_pop()) ;
54895 +
54896 + if (alloc_stack) {
54897 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54898 + kfree(alloc_stack);
54899 + else
54900 + vfree(alloc_stack);
54901 + }
54902 +
54903 + alloc_stack = NULL;
54904 + alloc_stack_size = 1;
54905 + alloc_stack_next = 1;
54906 +
54907 + return;
54908 +}
54909 +
54910 +int
54911 +acl_alloc_stack_init(unsigned long size)
54912 +{
54913 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54914 + alloc_stack =
54915 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54916 + else
54917 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54918 +
54919 + alloc_stack_size = size;
54920 +
54921 + if (!alloc_stack)
54922 + return 0;
54923 + else
54924 + return 1;
54925 +}
54926 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54927 new file mode 100644
54928 index 0000000..6d21049
54929 --- /dev/null
54930 +++ b/grsecurity/gracl_cap.c
54931 @@ -0,0 +1,110 @@
54932 +#include <linux/kernel.h>
54933 +#include <linux/module.h>
54934 +#include <linux/sched.h>
54935 +#include <linux/gracl.h>
54936 +#include <linux/grsecurity.h>
54937 +#include <linux/grinternal.h>
54938 +
54939 +extern const char *captab_log[];
54940 +extern int captab_log_entries;
54941 +
54942 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54943 +{
54944 + struct acl_subject_label *curracl;
54945 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54946 + kernel_cap_t cap_audit = __cap_empty_set;
54947 +
54948 + if (!gr_acl_is_enabled())
54949 + return 1;
54950 +
54951 + curracl = task->acl;
54952 +
54953 + cap_drop = curracl->cap_lower;
54954 + cap_mask = curracl->cap_mask;
54955 + cap_audit = curracl->cap_invert_audit;
54956 +
54957 + while ((curracl = curracl->parent_subject)) {
54958 + /* if the cap isn't specified in the current computed mask but is specified in the
54959 + current level subject, and is lowered in the current level subject, then add
54960 + it to the set of dropped capabilities
54961 + otherwise, add the current level subject's mask to the current computed mask
54962 + */
54963 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54964 + cap_raise(cap_mask, cap);
54965 + if (cap_raised(curracl->cap_lower, cap))
54966 + cap_raise(cap_drop, cap);
54967 + if (cap_raised(curracl->cap_invert_audit, cap))
54968 + cap_raise(cap_audit, cap);
54969 + }
54970 + }
54971 +
54972 + if (!cap_raised(cap_drop, cap)) {
54973 + if (cap_raised(cap_audit, cap))
54974 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54975 + return 1;
54976 + }
54977 +
54978 + curracl = task->acl;
54979 +
54980 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54981 + && cap_raised(cred->cap_effective, cap)) {
54982 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54983 + task->role->roletype, cred->uid,
54984 + cred->gid, task->exec_file ?
54985 + gr_to_filename(task->exec_file->f_path.dentry,
54986 + task->exec_file->f_path.mnt) : curracl->filename,
54987 + curracl->filename, 0UL,
54988 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54989 + return 1;
54990 + }
54991 +
54992 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54993 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54994 +
54995 + return 0;
54996 +}
54997 +
54998 +int
54999 +gr_acl_is_capable(const int cap)
55000 +{
55001 + return gr_task_acl_is_capable(current, current_cred(), cap);
55002 +}
55003 +
55004 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55005 +{
55006 + struct acl_subject_label *curracl;
55007 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55008 +
55009 + if (!gr_acl_is_enabled())
55010 + return 1;
55011 +
55012 + curracl = task->acl;
55013 +
55014 + cap_drop = curracl->cap_lower;
55015 + cap_mask = curracl->cap_mask;
55016 +
55017 + while ((curracl = curracl->parent_subject)) {
55018 + /* if the cap isn't specified in the current computed mask but is specified in the
55019 + current level subject, and is lowered in the current level subject, then add
55020 + it to the set of dropped capabilities
55021 + otherwise, add the current level subject's mask to the current computed mask
55022 + */
55023 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55024 + cap_raise(cap_mask, cap);
55025 + if (cap_raised(curracl->cap_lower, cap))
55026 + cap_raise(cap_drop, cap);
55027 + }
55028 + }
55029 +
55030 + if (!cap_raised(cap_drop, cap))
55031 + return 1;
55032 +
55033 + return 0;
55034 +}
55035 +
55036 +int
55037 +gr_acl_is_capable_nolog(const int cap)
55038 +{
55039 + return gr_task_acl_is_capable_nolog(current, cap);
55040 +}
55041 +
55042 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55043 new file mode 100644
55044 index 0000000..88d0e87
55045 --- /dev/null
55046 +++ b/grsecurity/gracl_fs.c
55047 @@ -0,0 +1,435 @@
55048 +#include <linux/kernel.h>
55049 +#include <linux/sched.h>
55050 +#include <linux/types.h>
55051 +#include <linux/fs.h>
55052 +#include <linux/file.h>
55053 +#include <linux/stat.h>
55054 +#include <linux/grsecurity.h>
55055 +#include <linux/grinternal.h>
55056 +#include <linux/gracl.h>
55057 +
55058 +umode_t
55059 +gr_acl_umask(void)
55060 +{
55061 + if (unlikely(!gr_acl_is_enabled()))
55062 + return 0;
55063 +
55064 + return current->role->umask;
55065 +}
55066 +
55067 +__u32
55068 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55069 + const struct vfsmount * mnt)
55070 +{
55071 + __u32 mode;
55072 +
55073 + if (unlikely(!dentry->d_inode))
55074 + return GR_FIND;
55075 +
55076 + mode =
55077 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55078 +
55079 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55080 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55081 + return mode;
55082 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55083 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55084 + return 0;
55085 + } else if (unlikely(!(mode & GR_FIND)))
55086 + return 0;
55087 +
55088 + return GR_FIND;
55089 +}
55090 +
55091 +__u32
55092 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55093 + int acc_mode)
55094 +{
55095 + __u32 reqmode = GR_FIND;
55096 + __u32 mode;
55097 +
55098 + if (unlikely(!dentry->d_inode))
55099 + return reqmode;
55100 +
55101 + if (acc_mode & MAY_APPEND)
55102 + reqmode |= GR_APPEND;
55103 + else if (acc_mode & MAY_WRITE)
55104 + reqmode |= GR_WRITE;
55105 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55106 + reqmode |= GR_READ;
55107 +
55108 + mode =
55109 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55110 + mnt);
55111 +
55112 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55113 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55114 + reqmode & GR_READ ? " reading" : "",
55115 + reqmode & GR_WRITE ? " writing" : reqmode &
55116 + GR_APPEND ? " appending" : "");
55117 + return reqmode;
55118 + } else
55119 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55120 + {
55121 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55122 + reqmode & GR_READ ? " reading" : "",
55123 + reqmode & GR_WRITE ? " writing" : reqmode &
55124 + GR_APPEND ? " appending" : "");
55125 + return 0;
55126 + } else if (unlikely((mode & reqmode) != reqmode))
55127 + return 0;
55128 +
55129 + return reqmode;
55130 +}
55131 +
55132 +__u32
55133 +gr_acl_handle_creat(const struct dentry * dentry,
55134 + const struct dentry * p_dentry,
55135 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55136 + const int imode)
55137 +{
55138 + __u32 reqmode = GR_WRITE | GR_CREATE;
55139 + __u32 mode;
55140 +
55141 + if (acc_mode & MAY_APPEND)
55142 + reqmode |= GR_APPEND;
55143 + // if a directory was required or the directory already exists, then
55144 + // don't count this open as a read
55145 + if ((acc_mode & MAY_READ) &&
55146 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55147 + reqmode |= GR_READ;
55148 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55149 + reqmode |= GR_SETID;
55150 +
55151 + mode =
55152 + gr_check_create(dentry, p_dentry, p_mnt,
55153 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55154 +
55155 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55156 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55157 + reqmode & GR_READ ? " reading" : "",
55158 + reqmode & GR_WRITE ? " writing" : reqmode &
55159 + GR_APPEND ? " appending" : "");
55160 + return reqmode;
55161 + } else
55162 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55163 + {
55164 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55165 + reqmode & GR_READ ? " reading" : "",
55166 + reqmode & GR_WRITE ? " writing" : reqmode &
55167 + GR_APPEND ? " appending" : "");
55168 + return 0;
55169 + } else if (unlikely((mode & reqmode) != reqmode))
55170 + return 0;
55171 +
55172 + return reqmode;
55173 +}
55174 +
55175 +__u32
55176 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55177 + const int fmode)
55178 +{
55179 + __u32 mode, reqmode = GR_FIND;
55180 +
55181 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55182 + reqmode |= GR_EXEC;
55183 + if (fmode & S_IWOTH)
55184 + reqmode |= GR_WRITE;
55185 + if (fmode & S_IROTH)
55186 + reqmode |= GR_READ;
55187 +
55188 + mode =
55189 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55190 + mnt);
55191 +
55192 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55193 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55194 + reqmode & GR_READ ? " reading" : "",
55195 + reqmode & GR_WRITE ? " writing" : "",
55196 + reqmode & GR_EXEC ? " executing" : "");
55197 + return reqmode;
55198 + } else
55199 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55200 + {
55201 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55202 + reqmode & GR_READ ? " reading" : "",
55203 + reqmode & GR_WRITE ? " writing" : "",
55204 + reqmode & GR_EXEC ? " executing" : "");
55205 + return 0;
55206 + } else if (unlikely((mode & reqmode) != reqmode))
55207 + return 0;
55208 +
55209 + return reqmode;
55210 +}
55211 +
55212 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55213 +{
55214 + __u32 mode;
55215 +
55216 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55217 +
55218 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55219 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55220 + return mode;
55221 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55222 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55223 + return 0;
55224 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55225 + return 0;
55226 +
55227 + return (reqmode);
55228 +}
55229 +
55230 +__u32
55231 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55232 +{
55233 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55234 +}
55235 +
55236 +__u32
55237 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55238 +{
55239 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55240 +}
55241 +
55242 +__u32
55243 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55244 +{
55245 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55246 +}
55247 +
55248 +__u32
55249 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55250 +{
55251 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55252 +}
55253 +
55254 +__u32
55255 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55256 + umode_t *modeptr)
55257 +{
55258 + umode_t mode;
55259 +
55260 + *modeptr &= ~gr_acl_umask();
55261 + mode = *modeptr;
55262 +
55263 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55264 + return 1;
55265 +
55266 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
55267 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55268 + GR_CHMOD_ACL_MSG);
55269 + } else {
55270 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55271 + }
55272 +}
55273 +
55274 +__u32
55275 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55276 +{
55277 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55278 +}
55279 +
55280 +__u32
55281 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55282 +{
55283 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55284 +}
55285 +
55286 +__u32
55287 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55288 +{
55289 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55290 +}
55291 +
55292 +__u32
55293 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55294 +{
55295 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55296 + GR_UNIXCONNECT_ACL_MSG);
55297 +}
55298 +
55299 +/* hardlinks require at minimum create and link permission,
55300 + any additional privilege required is based on the
55301 + privilege of the file being linked to
55302 +*/
55303 +__u32
55304 +gr_acl_handle_link(const struct dentry * new_dentry,
55305 + const struct dentry * parent_dentry,
55306 + const struct vfsmount * parent_mnt,
55307 + const struct dentry * old_dentry,
55308 + const struct vfsmount * old_mnt, const char *to)
55309 +{
55310 + __u32 mode;
55311 + __u32 needmode = GR_CREATE | GR_LINK;
55312 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55313 +
55314 + mode =
55315 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55316 + old_mnt);
55317 +
55318 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55319 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55320 + return mode;
55321 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55322 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55323 + return 0;
55324 + } else if (unlikely((mode & needmode) != needmode))
55325 + return 0;
55326 +
55327 + return 1;
55328 +}
55329 +
55330 +__u32
55331 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55332 + const struct dentry * parent_dentry,
55333 + const struct vfsmount * parent_mnt, const char *from)
55334 +{
55335 + __u32 needmode = GR_WRITE | GR_CREATE;
55336 + __u32 mode;
55337 +
55338 + mode =
55339 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55340 + GR_CREATE | GR_AUDIT_CREATE |
55341 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55342 +
55343 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55344 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55345 + return mode;
55346 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55347 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55348 + return 0;
55349 + } else if (unlikely((mode & needmode) != needmode))
55350 + return 0;
55351 +
55352 + return (GR_WRITE | GR_CREATE);
55353 +}
55354 +
55355 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55356 +{
55357 + __u32 mode;
55358 +
55359 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55360 +
55361 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55362 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55363 + return mode;
55364 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55365 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55366 + return 0;
55367 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55368 + return 0;
55369 +
55370 + return (reqmode);
55371 +}
55372 +
55373 +__u32
55374 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55375 + const struct dentry * parent_dentry,
55376 + const struct vfsmount * parent_mnt,
55377 + const int mode)
55378 +{
55379 + __u32 reqmode = GR_WRITE | GR_CREATE;
55380 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55381 + reqmode |= GR_SETID;
55382 +
55383 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55384 + reqmode, GR_MKNOD_ACL_MSG);
55385 +}
55386 +
55387 +__u32
55388 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55389 + const struct dentry *parent_dentry,
55390 + const struct vfsmount *parent_mnt)
55391 +{
55392 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55393 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55394 +}
55395 +
55396 +#define RENAME_CHECK_SUCCESS(old, new) \
55397 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55398 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55399 +
55400 +int
55401 +gr_acl_handle_rename(struct dentry *new_dentry,
55402 + struct dentry *parent_dentry,
55403 + const struct vfsmount *parent_mnt,
55404 + struct dentry *old_dentry,
55405 + struct inode *old_parent_inode,
55406 + struct vfsmount *old_mnt, const char *newname)
55407 +{
55408 + __u32 comp1, comp2;
55409 + int error = 0;
55410 +
55411 + if (unlikely(!gr_acl_is_enabled()))
55412 + return 0;
55413 +
55414 + if (!new_dentry->d_inode) {
55415 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55416 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55417 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55418 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55419 + GR_DELETE | GR_AUDIT_DELETE |
55420 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55421 + GR_SUPPRESS, old_mnt);
55422 + } else {
55423 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55424 + GR_CREATE | GR_DELETE |
55425 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55426 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55427 + GR_SUPPRESS, parent_mnt);
55428 + comp2 =
55429 + gr_search_file(old_dentry,
55430 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55431 + GR_DELETE | GR_AUDIT_DELETE |
55432 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55433 + }
55434 +
55435 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55436 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55437 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55438 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55439 + && !(comp2 & GR_SUPPRESS)) {
55440 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55441 + error = -EACCES;
55442 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55443 + error = -EACCES;
55444 +
55445 + return error;
55446 +}
55447 +
55448 +void
55449 +gr_acl_handle_exit(void)
55450 +{
55451 + u16 id;
55452 + char *rolename;
55453 + struct file *exec_file;
55454 +
55455 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55456 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55457 + id = current->acl_role_id;
55458 + rolename = current->role->rolename;
55459 + gr_set_acls(1);
55460 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55461 + }
55462 +
55463 + write_lock(&grsec_exec_file_lock);
55464 + exec_file = current->exec_file;
55465 + current->exec_file = NULL;
55466 + write_unlock(&grsec_exec_file_lock);
55467 +
55468 + if (exec_file)
55469 + fput(exec_file);
55470 +}
55471 +
55472 +int
55473 +gr_acl_handle_procpidmem(const struct task_struct *task)
55474 +{
55475 + if (unlikely(!gr_acl_is_enabled()))
55476 + return 0;
55477 +
55478 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55479 + return -EACCES;
55480 +
55481 + return 0;
55482 +}
55483 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55484 new file mode 100644
55485 index 0000000..58800a7
55486 --- /dev/null
55487 +++ b/grsecurity/gracl_ip.c
55488 @@ -0,0 +1,384 @@
55489 +#include <linux/kernel.h>
55490 +#include <asm/uaccess.h>
55491 +#include <asm/errno.h>
55492 +#include <net/sock.h>
55493 +#include <linux/file.h>
55494 +#include <linux/fs.h>
55495 +#include <linux/net.h>
55496 +#include <linux/in.h>
55497 +#include <linux/skbuff.h>
55498 +#include <linux/ip.h>
55499 +#include <linux/udp.h>
55500 +#include <linux/types.h>
55501 +#include <linux/sched.h>
55502 +#include <linux/netdevice.h>
55503 +#include <linux/inetdevice.h>
55504 +#include <linux/gracl.h>
55505 +#include <linux/grsecurity.h>
55506 +#include <linux/grinternal.h>
55507 +
55508 +#define GR_BIND 0x01
55509 +#define GR_CONNECT 0x02
55510 +#define GR_INVERT 0x04
55511 +#define GR_BINDOVERRIDE 0x08
55512 +#define GR_CONNECTOVERRIDE 0x10
55513 +#define GR_SOCK_FAMILY 0x20
55514 +
55515 +static const char * gr_protocols[IPPROTO_MAX] = {
55516 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55517 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55518 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55519 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55520 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55521 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55522 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55523 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55524 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55525 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55526 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55527 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55528 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55529 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55530 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55531 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55532 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55533 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55534 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55535 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55536 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55537 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55538 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55539 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55540 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55541 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55542 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55543 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55544 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55545 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55546 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55547 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55548 + };
55549 +
55550 +static const char * gr_socktypes[SOCK_MAX] = {
55551 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55552 + "unknown:7", "unknown:8", "unknown:9", "packet"
55553 + };
55554 +
55555 +static const char * gr_sockfamilies[AF_MAX+1] = {
55556 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55557 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55558 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55559 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55560 + };
55561 +
55562 +const char *
55563 +gr_proto_to_name(unsigned char proto)
55564 +{
55565 + return gr_protocols[proto];
55566 +}
55567 +
55568 +const char *
55569 +gr_socktype_to_name(unsigned char type)
55570 +{
55571 + return gr_socktypes[type];
55572 +}
55573 +
55574 +const char *
55575 +gr_sockfamily_to_name(unsigned char family)
55576 +{
55577 + return gr_sockfamilies[family];
55578 +}
55579 +
55580 +int
55581 +gr_search_socket(const int domain, const int type, const int protocol)
55582 +{
55583 + struct acl_subject_label *curr;
55584 + const struct cred *cred = current_cred();
55585 +
55586 + if (unlikely(!gr_acl_is_enabled()))
55587 + goto exit;
55588 +
55589 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55590 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55591 + goto exit; // let the kernel handle it
55592 +
55593 + curr = current->acl;
55594 +
55595 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55596 + /* the family is allowed, if this is PF_INET allow it only if
55597 + the extra sock type/protocol checks pass */
55598 + if (domain == PF_INET)
55599 + goto inet_check;
55600 + goto exit;
55601 + } else {
55602 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55603 + __u32 fakeip = 0;
55604 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55605 + current->role->roletype, cred->uid,
55606 + cred->gid, current->exec_file ?
55607 + gr_to_filename(current->exec_file->f_path.dentry,
55608 + current->exec_file->f_path.mnt) :
55609 + curr->filename, curr->filename,
55610 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55611 + &current->signal->saved_ip);
55612 + goto exit;
55613 + }
55614 + goto exit_fail;
55615 + }
55616 +
55617 +inet_check:
55618 + /* the rest of this checking is for IPv4 only */
55619 + if (!curr->ips)
55620 + goto exit;
55621 +
55622 + if ((curr->ip_type & (1 << type)) &&
55623 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55624 + goto exit;
55625 +
55626 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55627 + /* we don't place acls on raw sockets , and sometimes
55628 + dgram/ip sockets are opened for ioctl and not
55629 + bind/connect, so we'll fake a bind learn log */
55630 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55631 + __u32 fakeip = 0;
55632 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55633 + current->role->roletype, cred->uid,
55634 + cred->gid, current->exec_file ?
55635 + gr_to_filename(current->exec_file->f_path.dentry,
55636 + current->exec_file->f_path.mnt) :
55637 + curr->filename, curr->filename,
55638 + &fakeip, 0, type,
55639 + protocol, GR_CONNECT, &current->signal->saved_ip);
55640 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55641 + __u32 fakeip = 0;
55642 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55643 + current->role->roletype, cred->uid,
55644 + cred->gid, current->exec_file ?
55645 + gr_to_filename(current->exec_file->f_path.dentry,
55646 + current->exec_file->f_path.mnt) :
55647 + curr->filename, curr->filename,
55648 + &fakeip, 0, type,
55649 + protocol, GR_BIND, &current->signal->saved_ip);
55650 + }
55651 + /* we'll log when they use connect or bind */
55652 + goto exit;
55653 + }
55654 +
55655 +exit_fail:
55656 + if (domain == PF_INET)
55657 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55658 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55659 + else
55660 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55661 + gr_socktype_to_name(type), protocol);
55662 +
55663 + return 0;
55664 +exit:
55665 + return 1;
55666 +}
55667 +
55668 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55669 +{
55670 + if ((ip->mode & mode) &&
55671 + (ip_port >= ip->low) &&
55672 + (ip_port <= ip->high) &&
55673 + ((ntohl(ip_addr) & our_netmask) ==
55674 + (ntohl(our_addr) & our_netmask))
55675 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55676 + && (ip->type & (1 << type))) {
55677 + if (ip->mode & GR_INVERT)
55678 + return 2; // specifically denied
55679 + else
55680 + return 1; // allowed
55681 + }
55682 +
55683 + return 0; // not specifically allowed, may continue parsing
55684 +}
55685 +
55686 +static int
55687 +gr_search_connectbind(const int full_mode, struct sock *sk,
55688 + struct sockaddr_in *addr, const int type)
55689 +{
55690 + char iface[IFNAMSIZ] = {0};
55691 + struct acl_subject_label *curr;
55692 + struct acl_ip_label *ip;
55693 + struct inet_sock *isk;
55694 + struct net_device *dev;
55695 + struct in_device *idev;
55696 + unsigned long i;
55697 + int ret;
55698 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55699 + __u32 ip_addr = 0;
55700 + __u32 our_addr;
55701 + __u32 our_netmask;
55702 + char *p;
55703 + __u16 ip_port = 0;
55704 + const struct cred *cred = current_cred();
55705 +
55706 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55707 + return 0;
55708 +
55709 + curr = current->acl;
55710 + isk = inet_sk(sk);
55711 +
55712 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55713 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55714 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55715 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55716 + struct sockaddr_in saddr;
55717 + int err;
55718 +
55719 + saddr.sin_family = AF_INET;
55720 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55721 + saddr.sin_port = isk->inet_sport;
55722 +
55723 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55724 + if (err)
55725 + return err;
55726 +
55727 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55728 + if (err)
55729 + return err;
55730 + }
55731 +
55732 + if (!curr->ips)
55733 + return 0;
55734 +
55735 + ip_addr = addr->sin_addr.s_addr;
55736 + ip_port = ntohs(addr->sin_port);
55737 +
55738 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55739 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55740 + current->role->roletype, cred->uid,
55741 + cred->gid, current->exec_file ?
55742 + gr_to_filename(current->exec_file->f_path.dentry,
55743 + current->exec_file->f_path.mnt) :
55744 + curr->filename, curr->filename,
55745 + &ip_addr, ip_port, type,
55746 + sk->sk_protocol, mode, &current->signal->saved_ip);
55747 + return 0;
55748 + }
55749 +
55750 + for (i = 0; i < curr->ip_num; i++) {
55751 + ip = *(curr->ips + i);
55752 + if (ip->iface != NULL) {
55753 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55754 + p = strchr(iface, ':');
55755 + if (p != NULL)
55756 + *p = '\0';
55757 + dev = dev_get_by_name(sock_net(sk), iface);
55758 + if (dev == NULL)
55759 + continue;
55760 + idev = in_dev_get(dev);
55761 + if (idev == NULL) {
55762 + dev_put(dev);
55763 + continue;
55764 + }
55765 + rcu_read_lock();
55766 + for_ifa(idev) {
55767 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55768 + our_addr = ifa->ifa_address;
55769 + our_netmask = 0xffffffff;
55770 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55771 + if (ret == 1) {
55772 + rcu_read_unlock();
55773 + in_dev_put(idev);
55774 + dev_put(dev);
55775 + return 0;
55776 + } else if (ret == 2) {
55777 + rcu_read_unlock();
55778 + in_dev_put(idev);
55779 + dev_put(dev);
55780 + goto denied;
55781 + }
55782 + }
55783 + } endfor_ifa(idev);
55784 + rcu_read_unlock();
55785 + in_dev_put(idev);
55786 + dev_put(dev);
55787 + } else {
55788 + our_addr = ip->addr;
55789 + our_netmask = ip->netmask;
55790 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55791 + if (ret == 1)
55792 + return 0;
55793 + else if (ret == 2)
55794 + goto denied;
55795 + }
55796 + }
55797 +
55798 +denied:
55799 + if (mode == GR_BIND)
55800 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55801 + else if (mode == GR_CONNECT)
55802 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55803 +
55804 + return -EACCES;
55805 +}
55806 +
55807 +int
55808 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55809 +{
55810 + /* always allow disconnection of dgram sockets with connect */
55811 + if (addr->sin_family == AF_UNSPEC)
55812 + return 0;
55813 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55814 +}
55815 +
55816 +int
55817 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55818 +{
55819 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55820 +}
55821 +
55822 +int gr_search_listen(struct socket *sock)
55823 +{
55824 + struct sock *sk = sock->sk;
55825 + struct sockaddr_in addr;
55826 +
55827 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55828 + addr.sin_port = inet_sk(sk)->inet_sport;
55829 +
55830 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55831 +}
55832 +
55833 +int gr_search_accept(struct socket *sock)
55834 +{
55835 + struct sock *sk = sock->sk;
55836 + struct sockaddr_in addr;
55837 +
55838 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55839 + addr.sin_port = inet_sk(sk)->inet_sport;
55840 +
55841 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55842 +}
55843 +
55844 +int
55845 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55846 +{
55847 + if (addr)
55848 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55849 + else {
55850 + struct sockaddr_in sin;
55851 + const struct inet_sock *inet = inet_sk(sk);
55852 +
55853 + sin.sin_addr.s_addr = inet->inet_daddr;
55854 + sin.sin_port = inet->inet_dport;
55855 +
55856 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55857 + }
55858 +}
55859 +
55860 +int
55861 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55862 +{
55863 + struct sockaddr_in sin;
55864 +
55865 + if (unlikely(skb->len < sizeof (struct udphdr)))
55866 + return 0; // skip this packet
55867 +
55868 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55869 + sin.sin_port = udp_hdr(skb)->source;
55870 +
55871 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55872 +}
55873 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55874 new file mode 100644
55875 index 0000000..25f54ef
55876 --- /dev/null
55877 +++ b/grsecurity/gracl_learn.c
55878 @@ -0,0 +1,207 @@
55879 +#include <linux/kernel.h>
55880 +#include <linux/mm.h>
55881 +#include <linux/sched.h>
55882 +#include <linux/poll.h>
55883 +#include <linux/string.h>
55884 +#include <linux/file.h>
55885 +#include <linux/types.h>
55886 +#include <linux/vmalloc.h>
55887 +#include <linux/grinternal.h>
55888 +
55889 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55890 + size_t count, loff_t *ppos);
55891 +extern int gr_acl_is_enabled(void);
55892 +
55893 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55894 +static int gr_learn_attached;
55895 +
55896 +/* use a 512k buffer */
55897 +#define LEARN_BUFFER_SIZE (512 * 1024)
55898 +
55899 +static DEFINE_SPINLOCK(gr_learn_lock);
55900 +static DEFINE_MUTEX(gr_learn_user_mutex);
55901 +
55902 +/* we need to maintain two buffers, so that the kernel context of grlearn
55903 + uses a semaphore around the userspace copying, and the other kernel contexts
55904 + use a spinlock when copying into the buffer, since they cannot sleep
55905 +*/
55906 +static char *learn_buffer;
55907 +static char *learn_buffer_user;
55908 +static int learn_buffer_len;
55909 +static int learn_buffer_user_len;
55910 +
55911 +static ssize_t
55912 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55913 +{
55914 + DECLARE_WAITQUEUE(wait, current);
55915 + ssize_t retval = 0;
55916 +
55917 + add_wait_queue(&learn_wait, &wait);
55918 + set_current_state(TASK_INTERRUPTIBLE);
55919 + do {
55920 + mutex_lock(&gr_learn_user_mutex);
55921 + spin_lock(&gr_learn_lock);
55922 + if (learn_buffer_len)
55923 + break;
55924 + spin_unlock(&gr_learn_lock);
55925 + mutex_unlock(&gr_learn_user_mutex);
55926 + if (file->f_flags & O_NONBLOCK) {
55927 + retval = -EAGAIN;
55928 + goto out;
55929 + }
55930 + if (signal_pending(current)) {
55931 + retval = -ERESTARTSYS;
55932 + goto out;
55933 + }
55934 +
55935 + schedule();
55936 + } while (1);
55937 +
55938 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55939 + learn_buffer_user_len = learn_buffer_len;
55940 + retval = learn_buffer_len;
55941 + learn_buffer_len = 0;
55942 +
55943 + spin_unlock(&gr_learn_lock);
55944 +
55945 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55946 + retval = -EFAULT;
55947 +
55948 + mutex_unlock(&gr_learn_user_mutex);
55949 +out:
55950 + set_current_state(TASK_RUNNING);
55951 + remove_wait_queue(&learn_wait, &wait);
55952 + return retval;
55953 +}
55954 +
55955 +static unsigned int
55956 +poll_learn(struct file * file, poll_table * wait)
55957 +{
55958 + poll_wait(file, &learn_wait, wait);
55959 +
55960 + if (learn_buffer_len)
55961 + return (POLLIN | POLLRDNORM);
55962 +
55963 + return 0;
55964 +}
55965 +
55966 +void
55967 +gr_clear_learn_entries(void)
55968 +{
55969 + char *tmp;
55970 +
55971 + mutex_lock(&gr_learn_user_mutex);
55972 + spin_lock(&gr_learn_lock);
55973 + tmp = learn_buffer;
55974 + learn_buffer = NULL;
55975 + spin_unlock(&gr_learn_lock);
55976 + if (tmp)
55977 + vfree(tmp);
55978 + if (learn_buffer_user != NULL) {
55979 + vfree(learn_buffer_user);
55980 + learn_buffer_user = NULL;
55981 + }
55982 + learn_buffer_len = 0;
55983 + mutex_unlock(&gr_learn_user_mutex);
55984 +
55985 + return;
55986 +}
55987 +
55988 +void
55989 +gr_add_learn_entry(const char *fmt, ...)
55990 +{
55991 + va_list args;
55992 + unsigned int len;
55993 +
55994 + if (!gr_learn_attached)
55995 + return;
55996 +
55997 + spin_lock(&gr_learn_lock);
55998 +
55999 + /* leave a gap at the end so we know when it's "full" but don't have to
56000 + compute the exact length of the string we're trying to append
56001 + */
56002 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56003 + spin_unlock(&gr_learn_lock);
56004 + wake_up_interruptible(&learn_wait);
56005 + return;
56006 + }
56007 + if (learn_buffer == NULL) {
56008 + spin_unlock(&gr_learn_lock);
56009 + return;
56010 + }
56011 +
56012 + va_start(args, fmt);
56013 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56014 + va_end(args);
56015 +
56016 + learn_buffer_len += len + 1;
56017 +
56018 + spin_unlock(&gr_learn_lock);
56019 + wake_up_interruptible(&learn_wait);
56020 +
56021 + return;
56022 +}
56023 +
56024 +static int
56025 +open_learn(struct inode *inode, struct file *file)
56026 +{
56027 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56028 + return -EBUSY;
56029 + if (file->f_mode & FMODE_READ) {
56030 + int retval = 0;
56031 + mutex_lock(&gr_learn_user_mutex);
56032 + if (learn_buffer == NULL)
56033 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56034 + if (learn_buffer_user == NULL)
56035 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56036 + if (learn_buffer == NULL) {
56037 + retval = -ENOMEM;
56038 + goto out_error;
56039 + }
56040 + if (learn_buffer_user == NULL) {
56041 + retval = -ENOMEM;
56042 + goto out_error;
56043 + }
56044 + learn_buffer_len = 0;
56045 + learn_buffer_user_len = 0;
56046 + gr_learn_attached = 1;
56047 +out_error:
56048 + mutex_unlock(&gr_learn_user_mutex);
56049 + return retval;
56050 + }
56051 + return 0;
56052 +}
56053 +
56054 +static int
56055 +close_learn(struct inode *inode, struct file *file)
56056 +{
56057 + if (file->f_mode & FMODE_READ) {
56058 + char *tmp = NULL;
56059 + mutex_lock(&gr_learn_user_mutex);
56060 + spin_lock(&gr_learn_lock);
56061 + tmp = learn_buffer;
56062 + learn_buffer = NULL;
56063 + spin_unlock(&gr_learn_lock);
56064 + if (tmp)
56065 + vfree(tmp);
56066 + if (learn_buffer_user != NULL) {
56067 + vfree(learn_buffer_user);
56068 + learn_buffer_user = NULL;
56069 + }
56070 + learn_buffer_len = 0;
56071 + learn_buffer_user_len = 0;
56072 + gr_learn_attached = 0;
56073 + mutex_unlock(&gr_learn_user_mutex);
56074 + }
56075 +
56076 + return 0;
56077 +}
56078 +
56079 +const struct file_operations grsec_fops = {
56080 + .read = read_learn,
56081 + .write = write_grsec_handler,
56082 + .open = open_learn,
56083 + .release = close_learn,
56084 + .poll = poll_learn,
56085 +};
56086 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56087 new file mode 100644
56088 index 0000000..39645c9
56089 --- /dev/null
56090 +++ b/grsecurity/gracl_res.c
56091 @@ -0,0 +1,68 @@
56092 +#include <linux/kernel.h>
56093 +#include <linux/sched.h>
56094 +#include <linux/gracl.h>
56095 +#include <linux/grinternal.h>
56096 +
56097 +static const char *restab_log[] = {
56098 + [RLIMIT_CPU] = "RLIMIT_CPU",
56099 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56100 + [RLIMIT_DATA] = "RLIMIT_DATA",
56101 + [RLIMIT_STACK] = "RLIMIT_STACK",
56102 + [RLIMIT_CORE] = "RLIMIT_CORE",
56103 + [RLIMIT_RSS] = "RLIMIT_RSS",
56104 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56105 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56106 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56107 + [RLIMIT_AS] = "RLIMIT_AS",
56108 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56109 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56110 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56111 + [RLIMIT_NICE] = "RLIMIT_NICE",
56112 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56113 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56114 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56115 +};
56116 +
56117 +void
56118 +gr_log_resource(const struct task_struct *task,
56119 + const int res, const unsigned long wanted, const int gt)
56120 +{
56121 + const struct cred *cred;
56122 + unsigned long rlim;
56123 +
56124 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56125 + return;
56126 +
56127 + // not yet supported resource
56128 + if (unlikely(!restab_log[res]))
56129 + return;
56130 +
56131 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56132 + rlim = task_rlimit_max(task, res);
56133 + else
56134 + rlim = task_rlimit(task, res);
56135 +
56136 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56137 + return;
56138 +
56139 + rcu_read_lock();
56140 + cred = __task_cred(task);
56141 +
56142 + if (res == RLIMIT_NPROC &&
56143 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56144 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56145 + goto out_rcu_unlock;
56146 + else if (res == RLIMIT_MEMLOCK &&
56147 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56148 + goto out_rcu_unlock;
56149 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56150 + goto out_rcu_unlock;
56151 + rcu_read_unlock();
56152 +
56153 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56154 +
56155 + return;
56156 +out_rcu_unlock:
56157 + rcu_read_unlock();
56158 + return;
56159 +}
56160 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56161 new file mode 100644
56162 index 0000000..5556be3
56163 --- /dev/null
56164 +++ b/grsecurity/gracl_segv.c
56165 @@ -0,0 +1,299 @@
56166 +#include <linux/kernel.h>
56167 +#include <linux/mm.h>
56168 +#include <asm/uaccess.h>
56169 +#include <asm/errno.h>
56170 +#include <asm/mman.h>
56171 +#include <net/sock.h>
56172 +#include <linux/file.h>
56173 +#include <linux/fs.h>
56174 +#include <linux/net.h>
56175 +#include <linux/in.h>
56176 +#include <linux/slab.h>
56177 +#include <linux/types.h>
56178 +#include <linux/sched.h>
56179 +#include <linux/timer.h>
56180 +#include <linux/gracl.h>
56181 +#include <linux/grsecurity.h>
56182 +#include <linux/grinternal.h>
56183 +
56184 +static struct crash_uid *uid_set;
56185 +static unsigned short uid_used;
56186 +static DEFINE_SPINLOCK(gr_uid_lock);
56187 +extern rwlock_t gr_inode_lock;
56188 +extern struct acl_subject_label *
56189 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56190 + struct acl_role_label *role);
56191 +
56192 +#ifdef CONFIG_BTRFS_FS
56193 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56194 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56195 +#endif
56196 +
56197 +static inline dev_t __get_dev(const struct dentry *dentry)
56198 +{
56199 +#ifdef CONFIG_BTRFS_FS
56200 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56201 + return get_btrfs_dev_from_inode(dentry->d_inode);
56202 + else
56203 +#endif
56204 + return dentry->d_inode->i_sb->s_dev;
56205 +}
56206 +
56207 +int
56208 +gr_init_uidset(void)
56209 +{
56210 + uid_set =
56211 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56212 + uid_used = 0;
56213 +
56214 + return uid_set ? 1 : 0;
56215 +}
56216 +
56217 +void
56218 +gr_free_uidset(void)
56219 +{
56220 + if (uid_set)
56221 + kfree(uid_set);
56222 +
56223 + return;
56224 +}
56225 +
56226 +int
56227 +gr_find_uid(const uid_t uid)
56228 +{
56229 + struct crash_uid *tmp = uid_set;
56230 + uid_t buid;
56231 + int low = 0, high = uid_used - 1, mid;
56232 +
56233 + while (high >= low) {
56234 + mid = (low + high) >> 1;
56235 + buid = tmp[mid].uid;
56236 + if (buid == uid)
56237 + return mid;
56238 + if (buid > uid)
56239 + high = mid - 1;
56240 + if (buid < uid)
56241 + low = mid + 1;
56242 + }
56243 +
56244 + return -1;
56245 +}
56246 +
56247 +static __inline__ void
56248 +gr_insertsort(void)
56249 +{
56250 + unsigned short i, j;
56251 + struct crash_uid index;
56252 +
56253 + for (i = 1; i < uid_used; i++) {
56254 + index = uid_set[i];
56255 + j = i;
56256 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56257 + uid_set[j] = uid_set[j - 1];
56258 + j--;
56259 + }
56260 + uid_set[j] = index;
56261 + }
56262 +
56263 + return;
56264 +}
56265 +
56266 +static __inline__ void
56267 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56268 +{
56269 + int loc;
56270 +
56271 + if (uid_used == GR_UIDTABLE_MAX)
56272 + return;
56273 +
56274 + loc = gr_find_uid(uid);
56275 +
56276 + if (loc >= 0) {
56277 + uid_set[loc].expires = expires;
56278 + return;
56279 + }
56280 +
56281 + uid_set[uid_used].uid = uid;
56282 + uid_set[uid_used].expires = expires;
56283 + uid_used++;
56284 +
56285 + gr_insertsort();
56286 +
56287 + return;
56288 +}
56289 +
56290 +void
56291 +gr_remove_uid(const unsigned short loc)
56292 +{
56293 + unsigned short i;
56294 +
56295 + for (i = loc + 1; i < uid_used; i++)
56296 + uid_set[i - 1] = uid_set[i];
56297 +
56298 + uid_used--;
56299 +
56300 + return;
56301 +}
56302 +
56303 +int
56304 +gr_check_crash_uid(const uid_t uid)
56305 +{
56306 + int loc;
56307 + int ret = 0;
56308 +
56309 + if (unlikely(!gr_acl_is_enabled()))
56310 + return 0;
56311 +
56312 + spin_lock(&gr_uid_lock);
56313 + loc = gr_find_uid(uid);
56314 +
56315 + if (loc < 0)
56316 + goto out_unlock;
56317 +
56318 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56319 + gr_remove_uid(loc);
56320 + else
56321 + ret = 1;
56322 +
56323 +out_unlock:
56324 + spin_unlock(&gr_uid_lock);
56325 + return ret;
56326 +}
56327 +
56328 +static __inline__ int
56329 +proc_is_setxid(const struct cred *cred)
56330 +{
56331 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56332 + cred->uid != cred->fsuid)
56333 + return 1;
56334 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56335 + cred->gid != cred->fsgid)
56336 + return 1;
56337 +
56338 + return 0;
56339 +}
56340 +
56341 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56342 +
56343 +void
56344 +gr_handle_crash(struct task_struct *task, const int sig)
56345 +{
56346 + struct acl_subject_label *curr;
56347 + struct task_struct *tsk, *tsk2;
56348 + const struct cred *cred;
56349 + const struct cred *cred2;
56350 +
56351 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56352 + return;
56353 +
56354 + if (unlikely(!gr_acl_is_enabled()))
56355 + return;
56356 +
56357 + curr = task->acl;
56358 +
56359 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56360 + return;
56361 +
56362 + if (time_before_eq(curr->expires, get_seconds())) {
56363 + curr->expires = 0;
56364 + curr->crashes = 0;
56365 + }
56366 +
56367 + curr->crashes++;
56368 +
56369 + if (!curr->expires)
56370 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56371 +
56372 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56373 + time_after(curr->expires, get_seconds())) {
56374 + rcu_read_lock();
56375 + cred = __task_cred(task);
56376 + if (cred->uid && proc_is_setxid(cred)) {
56377 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56378 + spin_lock(&gr_uid_lock);
56379 + gr_insert_uid(cred->uid, curr->expires);
56380 + spin_unlock(&gr_uid_lock);
56381 + curr->expires = 0;
56382 + curr->crashes = 0;
56383 + read_lock(&tasklist_lock);
56384 + do_each_thread(tsk2, tsk) {
56385 + cred2 = __task_cred(tsk);
56386 + if (tsk != task && cred2->uid == cred->uid)
56387 + gr_fake_force_sig(SIGKILL, tsk);
56388 + } while_each_thread(tsk2, tsk);
56389 + read_unlock(&tasklist_lock);
56390 + } else {
56391 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56392 + read_lock(&tasklist_lock);
56393 + read_lock(&grsec_exec_file_lock);
56394 + do_each_thread(tsk2, tsk) {
56395 + if (likely(tsk != task)) {
56396 + // if this thread has the same subject as the one that triggered
56397 + // RES_CRASH and it's the same binary, kill it
56398 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56399 + gr_fake_force_sig(SIGKILL, tsk);
56400 + }
56401 + } while_each_thread(tsk2, tsk);
56402 + read_unlock(&grsec_exec_file_lock);
56403 + read_unlock(&tasklist_lock);
56404 + }
56405 + rcu_read_unlock();
56406 + }
56407 +
56408 + return;
56409 +}
56410 +
56411 +int
56412 +gr_check_crash_exec(const struct file *filp)
56413 +{
56414 + struct acl_subject_label *curr;
56415 +
56416 + if (unlikely(!gr_acl_is_enabled()))
56417 + return 0;
56418 +
56419 + read_lock(&gr_inode_lock);
56420 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56421 + __get_dev(filp->f_path.dentry),
56422 + current->role);
56423 + read_unlock(&gr_inode_lock);
56424 +
56425 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56426 + (!curr->crashes && !curr->expires))
56427 + return 0;
56428 +
56429 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56430 + time_after(curr->expires, get_seconds()))
56431 + return 1;
56432 + else if (time_before_eq(curr->expires, get_seconds())) {
56433 + curr->crashes = 0;
56434 + curr->expires = 0;
56435 + }
56436 +
56437 + return 0;
56438 +}
56439 +
56440 +void
56441 +gr_handle_alertkill(struct task_struct *task)
56442 +{
56443 + struct acl_subject_label *curracl;
56444 + __u32 curr_ip;
56445 + struct task_struct *p, *p2;
56446 +
56447 + if (unlikely(!gr_acl_is_enabled()))
56448 + return;
56449 +
56450 + curracl = task->acl;
56451 + curr_ip = task->signal->curr_ip;
56452 +
56453 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56454 + read_lock(&tasklist_lock);
56455 + do_each_thread(p2, p) {
56456 + if (p->signal->curr_ip == curr_ip)
56457 + gr_fake_force_sig(SIGKILL, p);
56458 + } while_each_thread(p2, p);
56459 + read_unlock(&tasklist_lock);
56460 + } else if (curracl->mode & GR_KILLPROC)
56461 + gr_fake_force_sig(SIGKILL, task);
56462 +
56463 + return;
56464 +}
56465 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56466 new file mode 100644
56467 index 0000000..9d83a69
56468 --- /dev/null
56469 +++ b/grsecurity/gracl_shm.c
56470 @@ -0,0 +1,40 @@
56471 +#include <linux/kernel.h>
56472 +#include <linux/mm.h>
56473 +#include <linux/sched.h>
56474 +#include <linux/file.h>
56475 +#include <linux/ipc.h>
56476 +#include <linux/gracl.h>
56477 +#include <linux/grsecurity.h>
56478 +#include <linux/grinternal.h>
56479 +
56480 +int
56481 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56482 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56483 +{
56484 + struct task_struct *task;
56485 +
56486 + if (!gr_acl_is_enabled())
56487 + return 1;
56488 +
56489 + rcu_read_lock();
56490 + read_lock(&tasklist_lock);
56491 +
56492 + task = find_task_by_vpid(shm_cprid);
56493 +
56494 + if (unlikely(!task))
56495 + task = find_task_by_vpid(shm_lapid);
56496 +
56497 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56498 + (task->pid == shm_lapid)) &&
56499 + (task->acl->mode & GR_PROTSHM) &&
56500 + (task->acl != current->acl))) {
56501 + read_unlock(&tasklist_lock);
56502 + rcu_read_unlock();
56503 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56504 + return 0;
56505 + }
56506 + read_unlock(&tasklist_lock);
56507 + rcu_read_unlock();
56508 +
56509 + return 1;
56510 +}
56511 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56512 new file mode 100644
56513 index 0000000..bc0be01
56514 --- /dev/null
56515 +++ b/grsecurity/grsec_chdir.c
56516 @@ -0,0 +1,19 @@
56517 +#include <linux/kernel.h>
56518 +#include <linux/sched.h>
56519 +#include <linux/fs.h>
56520 +#include <linux/file.h>
56521 +#include <linux/grsecurity.h>
56522 +#include <linux/grinternal.h>
56523 +
56524 +void
56525 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56526 +{
56527 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56528 + if ((grsec_enable_chdir && grsec_enable_group &&
56529 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56530 + !grsec_enable_group)) {
56531 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56532 + }
56533 +#endif
56534 + return;
56535 +}
56536 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56537 new file mode 100644
56538 index 0000000..9807ee2
56539 --- /dev/null
56540 +++ b/grsecurity/grsec_chroot.c
56541 @@ -0,0 +1,368 @@
56542 +#include <linux/kernel.h>
56543 +#include <linux/module.h>
56544 +#include <linux/sched.h>
56545 +#include <linux/file.h>
56546 +#include <linux/fs.h>
56547 +#include <linux/mount.h>
56548 +#include <linux/types.h>
56549 +#include "../fs/mount.h"
56550 +#include <linux/grsecurity.h>
56551 +#include <linux/grinternal.h>
56552 +
56553 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56554 +{
56555 +#ifdef CONFIG_GRKERNSEC
56556 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56557 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56558 + task->gr_is_chrooted = 1;
56559 + else
56560 + task->gr_is_chrooted = 0;
56561 +
56562 + task->gr_chroot_dentry = path->dentry;
56563 +#endif
56564 + return;
56565 +}
56566 +
56567 +void gr_clear_chroot_entries(struct task_struct *task)
56568 +{
56569 +#ifdef CONFIG_GRKERNSEC
56570 + task->gr_is_chrooted = 0;
56571 + task->gr_chroot_dentry = NULL;
56572 +#endif
56573 + return;
56574 +}
56575 +
56576 +int
56577 +gr_handle_chroot_unix(const pid_t pid)
56578 +{
56579 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56580 + struct task_struct *p;
56581 +
56582 + if (unlikely(!grsec_enable_chroot_unix))
56583 + return 1;
56584 +
56585 + if (likely(!proc_is_chrooted(current)))
56586 + return 1;
56587 +
56588 + rcu_read_lock();
56589 + read_lock(&tasklist_lock);
56590 + p = find_task_by_vpid_unrestricted(pid);
56591 + if (unlikely(p && !have_same_root(current, p))) {
56592 + read_unlock(&tasklist_lock);
56593 + rcu_read_unlock();
56594 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56595 + return 0;
56596 + }
56597 + read_unlock(&tasklist_lock);
56598 + rcu_read_unlock();
56599 +#endif
56600 + return 1;
56601 +}
56602 +
56603 +int
56604 +gr_handle_chroot_nice(void)
56605 +{
56606 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56607 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56608 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56609 + return -EPERM;
56610 + }
56611 +#endif
56612 + return 0;
56613 +}
56614 +
56615 +int
56616 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56617 +{
56618 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56619 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56620 + && proc_is_chrooted(current)) {
56621 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56622 + return -EACCES;
56623 + }
56624 +#endif
56625 + return 0;
56626 +}
56627 +
56628 +int
56629 +gr_handle_chroot_rawio(const struct inode *inode)
56630 +{
56631 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56632 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56633 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56634 + return 1;
56635 +#endif
56636 + return 0;
56637 +}
56638 +
56639 +int
56640 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56641 +{
56642 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56643 + struct task_struct *p;
56644 + int ret = 0;
56645 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56646 + return ret;
56647 +
56648 + read_lock(&tasklist_lock);
56649 + do_each_pid_task(pid, type, p) {
56650 + if (!have_same_root(current, p)) {
56651 + ret = 1;
56652 + goto out;
56653 + }
56654 + } while_each_pid_task(pid, type, p);
56655 +out:
56656 + read_unlock(&tasklist_lock);
56657 + return ret;
56658 +#endif
56659 + return 0;
56660 +}
56661 +
56662 +int
56663 +gr_pid_is_chrooted(struct task_struct *p)
56664 +{
56665 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56666 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56667 + return 0;
56668 +
56669 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56670 + !have_same_root(current, p)) {
56671 + return 1;
56672 + }
56673 +#endif
56674 + return 0;
56675 +}
56676 +
56677 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56678 +
56679 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56680 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56681 +{
56682 + struct path path, currentroot;
56683 + int ret = 0;
56684 +
56685 + path.dentry = (struct dentry *)u_dentry;
56686 + path.mnt = (struct vfsmount *)u_mnt;
56687 + get_fs_root(current->fs, &currentroot);
56688 + if (path_is_under(&path, &currentroot))
56689 + ret = 1;
56690 + path_put(&currentroot);
56691 +
56692 + return ret;
56693 +}
56694 +#endif
56695 +
56696 +int
56697 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56698 +{
56699 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56700 + if (!grsec_enable_chroot_fchdir)
56701 + return 1;
56702 +
56703 + if (!proc_is_chrooted(current))
56704 + return 1;
56705 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56706 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56707 + return 0;
56708 + }
56709 +#endif
56710 + return 1;
56711 +}
56712 +
56713 +int
56714 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56715 + const time_t shm_createtime)
56716 +{
56717 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56718 + struct task_struct *p;
56719 + time_t starttime;
56720 +
56721 + if (unlikely(!grsec_enable_chroot_shmat))
56722 + return 1;
56723 +
56724 + if (likely(!proc_is_chrooted(current)))
56725 + return 1;
56726 +
56727 + rcu_read_lock();
56728 + read_lock(&tasklist_lock);
56729 +
56730 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56731 + starttime = p->start_time.tv_sec;
56732 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56733 + if (have_same_root(current, p)) {
56734 + goto allow;
56735 + } else {
56736 + read_unlock(&tasklist_lock);
56737 + rcu_read_unlock();
56738 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56739 + return 0;
56740 + }
56741 + }
56742 + /* creator exited, pid reuse, fall through to next check */
56743 + }
56744 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56745 + if (unlikely(!have_same_root(current, p))) {
56746 + read_unlock(&tasklist_lock);
56747 + rcu_read_unlock();
56748 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56749 + return 0;
56750 + }
56751 + }
56752 +
56753 +allow:
56754 + read_unlock(&tasklist_lock);
56755 + rcu_read_unlock();
56756 +#endif
56757 + return 1;
56758 +}
56759 +
56760 +void
56761 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56762 +{
56763 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56764 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56765 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56766 +#endif
56767 + return;
56768 +}
56769 +
56770 +int
56771 +gr_handle_chroot_mknod(const struct dentry *dentry,
56772 + const struct vfsmount *mnt, const int mode)
56773 +{
56774 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56775 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56776 + proc_is_chrooted(current)) {
56777 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56778 + return -EPERM;
56779 + }
56780 +#endif
56781 + return 0;
56782 +}
56783 +
56784 +int
56785 +gr_handle_chroot_mount(const struct dentry *dentry,
56786 + const struct vfsmount *mnt, const char *dev_name)
56787 +{
56788 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56789 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56790 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56791 + return -EPERM;
56792 + }
56793 +#endif
56794 + return 0;
56795 +}
56796 +
56797 +int
56798 +gr_handle_chroot_pivot(void)
56799 +{
56800 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56801 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56802 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56803 + return -EPERM;
56804 + }
56805 +#endif
56806 + return 0;
56807 +}
56808 +
56809 +int
56810 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56811 +{
56812 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56813 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56814 + !gr_is_outside_chroot(dentry, mnt)) {
56815 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56816 + return -EPERM;
56817 + }
56818 +#endif
56819 + return 0;
56820 +}
56821 +
56822 +extern const char *captab_log[];
56823 +extern int captab_log_entries;
56824 +
56825 +int
56826 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56827 +{
56828 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56829 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56830 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56831 + if (cap_raised(chroot_caps, cap)) {
56832 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56833 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56834 + }
56835 + return 0;
56836 + }
56837 + }
56838 +#endif
56839 + return 1;
56840 +}
56841 +
56842 +int
56843 +gr_chroot_is_capable(const int cap)
56844 +{
56845 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56846 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56847 +#endif
56848 + return 1;
56849 +}
56850 +
56851 +int
56852 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56853 +{
56854 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56855 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56856 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56857 + if (cap_raised(chroot_caps, cap)) {
56858 + return 0;
56859 + }
56860 + }
56861 +#endif
56862 + return 1;
56863 +}
56864 +
56865 +int
56866 +gr_chroot_is_capable_nolog(const int cap)
56867 +{
56868 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56869 + return gr_task_chroot_is_capable_nolog(current, cap);
56870 +#endif
56871 + return 1;
56872 +}
56873 +
56874 +int
56875 +gr_handle_chroot_sysctl(const int op)
56876 +{
56877 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56878 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56879 + proc_is_chrooted(current))
56880 + return -EACCES;
56881 +#endif
56882 + return 0;
56883 +}
56884 +
56885 +void
56886 +gr_handle_chroot_chdir(struct path *path)
56887 +{
56888 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56889 + if (grsec_enable_chroot_chdir)
56890 + set_fs_pwd(current->fs, path);
56891 +#endif
56892 + return;
56893 +}
56894 +
56895 +int
56896 +gr_handle_chroot_chmod(const struct dentry *dentry,
56897 + const struct vfsmount *mnt, const int mode)
56898 +{
56899 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56900 + /* allow chmod +s on directories, but not files */
56901 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56902 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56903 + proc_is_chrooted(current)) {
56904 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56905 + return -EPERM;
56906 + }
56907 +#endif
56908 + return 0;
56909 +}
56910 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56911 new file mode 100644
56912 index 0000000..213ad8b
56913 --- /dev/null
56914 +++ b/grsecurity/grsec_disabled.c
56915 @@ -0,0 +1,437 @@
56916 +#include <linux/kernel.h>
56917 +#include <linux/module.h>
56918 +#include <linux/sched.h>
56919 +#include <linux/file.h>
56920 +#include <linux/fs.h>
56921 +#include <linux/kdev_t.h>
56922 +#include <linux/net.h>
56923 +#include <linux/in.h>
56924 +#include <linux/ip.h>
56925 +#include <linux/skbuff.h>
56926 +#include <linux/sysctl.h>
56927 +
56928 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56929 +void
56930 +pax_set_initial_flags(struct linux_binprm *bprm)
56931 +{
56932 + return;
56933 +}
56934 +#endif
56935 +
56936 +#ifdef CONFIG_SYSCTL
56937 +__u32
56938 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56939 +{
56940 + return 0;
56941 +}
56942 +#endif
56943 +
56944 +#ifdef CONFIG_TASKSTATS
56945 +int gr_is_taskstats_denied(int pid)
56946 +{
56947 + return 0;
56948 +}
56949 +#endif
56950 +
56951 +int
56952 +gr_acl_is_enabled(void)
56953 +{
56954 + return 0;
56955 +}
56956 +
56957 +void
56958 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56959 +{
56960 + return;
56961 +}
56962 +
56963 +int
56964 +gr_handle_rawio(const struct inode *inode)
56965 +{
56966 + return 0;
56967 +}
56968 +
56969 +void
56970 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56971 +{
56972 + return;
56973 +}
56974 +
56975 +int
56976 +gr_handle_ptrace(struct task_struct *task, const long request)
56977 +{
56978 + return 0;
56979 +}
56980 +
56981 +int
56982 +gr_handle_proc_ptrace(struct task_struct *task)
56983 +{
56984 + return 0;
56985 +}
56986 +
56987 +void
56988 +gr_learn_resource(const struct task_struct *task,
56989 + const int res, const unsigned long wanted, const int gt)
56990 +{
56991 + return;
56992 +}
56993 +
56994 +int
56995 +gr_set_acls(const int type)
56996 +{
56997 + return 0;
56998 +}
56999 +
57000 +int
57001 +gr_check_hidden_task(const struct task_struct *tsk)
57002 +{
57003 + return 0;
57004 +}
57005 +
57006 +int
57007 +gr_check_protected_task(const struct task_struct *task)
57008 +{
57009 + return 0;
57010 +}
57011 +
57012 +int
57013 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57014 +{
57015 + return 0;
57016 +}
57017 +
57018 +void
57019 +gr_copy_label(struct task_struct *tsk)
57020 +{
57021 + return;
57022 +}
57023 +
57024 +void
57025 +gr_set_pax_flags(struct task_struct *task)
57026 +{
57027 + return;
57028 +}
57029 +
57030 +int
57031 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57032 + const int unsafe_share)
57033 +{
57034 + return 0;
57035 +}
57036 +
57037 +void
57038 +gr_handle_delete(const ino_t ino, const dev_t dev)
57039 +{
57040 + return;
57041 +}
57042 +
57043 +void
57044 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57045 +{
57046 + return;
57047 +}
57048 +
57049 +void
57050 +gr_handle_crash(struct task_struct *task, const int sig)
57051 +{
57052 + return;
57053 +}
57054 +
57055 +int
57056 +gr_check_crash_exec(const struct file *filp)
57057 +{
57058 + return 0;
57059 +}
57060 +
57061 +int
57062 +gr_check_crash_uid(const uid_t uid)
57063 +{
57064 + return 0;
57065 +}
57066 +
57067 +void
57068 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57069 + struct dentry *old_dentry,
57070 + struct dentry *new_dentry,
57071 + struct vfsmount *mnt, const __u8 replace)
57072 +{
57073 + return;
57074 +}
57075 +
57076 +int
57077 +gr_search_socket(const int family, const int type, const int protocol)
57078 +{
57079 + return 1;
57080 +}
57081 +
57082 +int
57083 +gr_search_connectbind(const int mode, const struct socket *sock,
57084 + const struct sockaddr_in *addr)
57085 +{
57086 + return 0;
57087 +}
57088 +
57089 +void
57090 +gr_handle_alertkill(struct task_struct *task)
57091 +{
57092 + return;
57093 +}
57094 +
57095 +__u32
57096 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57097 +{
57098 + return 1;
57099 +}
57100 +
57101 +__u32
57102 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57103 + const struct vfsmount * mnt)
57104 +{
57105 + return 1;
57106 +}
57107 +
57108 +__u32
57109 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57110 + int acc_mode)
57111 +{
57112 + return 1;
57113 +}
57114 +
57115 +__u32
57116 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57117 +{
57118 + return 1;
57119 +}
57120 +
57121 +__u32
57122 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57123 +{
57124 + return 1;
57125 +}
57126 +
57127 +int
57128 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57129 + unsigned int *vm_flags)
57130 +{
57131 + return 1;
57132 +}
57133 +
57134 +__u32
57135 +gr_acl_handle_truncate(const struct dentry * dentry,
57136 + const struct vfsmount * mnt)
57137 +{
57138 + return 1;
57139 +}
57140 +
57141 +__u32
57142 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57143 +{
57144 + return 1;
57145 +}
57146 +
57147 +__u32
57148 +gr_acl_handle_access(const struct dentry * dentry,
57149 + const struct vfsmount * mnt, const int fmode)
57150 +{
57151 + return 1;
57152 +}
57153 +
57154 +__u32
57155 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57156 + umode_t *mode)
57157 +{
57158 + return 1;
57159 +}
57160 +
57161 +__u32
57162 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57163 +{
57164 + return 1;
57165 +}
57166 +
57167 +__u32
57168 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57169 +{
57170 + return 1;
57171 +}
57172 +
57173 +void
57174 +grsecurity_init(void)
57175 +{
57176 + return;
57177 +}
57178 +
57179 +umode_t gr_acl_umask(void)
57180 +{
57181 + return 0;
57182 +}
57183 +
57184 +__u32
57185 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57186 + const struct dentry * parent_dentry,
57187 + const struct vfsmount * parent_mnt,
57188 + const int mode)
57189 +{
57190 + return 1;
57191 +}
57192 +
57193 +__u32
57194 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57195 + const struct dentry * parent_dentry,
57196 + const struct vfsmount * parent_mnt)
57197 +{
57198 + return 1;
57199 +}
57200 +
57201 +__u32
57202 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57203 + const struct dentry * parent_dentry,
57204 + const struct vfsmount * parent_mnt, const char *from)
57205 +{
57206 + return 1;
57207 +}
57208 +
57209 +__u32
57210 +gr_acl_handle_link(const struct dentry * new_dentry,
57211 + const struct dentry * parent_dentry,
57212 + const struct vfsmount * parent_mnt,
57213 + const struct dentry * old_dentry,
57214 + const struct vfsmount * old_mnt, const char *to)
57215 +{
57216 + return 1;
57217 +}
57218 +
57219 +int
57220 +gr_acl_handle_rename(const struct dentry *new_dentry,
57221 + const struct dentry *parent_dentry,
57222 + const struct vfsmount *parent_mnt,
57223 + const struct dentry *old_dentry,
57224 + const struct inode *old_parent_inode,
57225 + const struct vfsmount *old_mnt, const char *newname)
57226 +{
57227 + return 0;
57228 +}
57229 +
57230 +int
57231 +gr_acl_handle_filldir(const struct file *file, const char *name,
57232 + const int namelen, const ino_t ino)
57233 +{
57234 + return 1;
57235 +}
57236 +
57237 +int
57238 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57239 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57240 +{
57241 + return 1;
57242 +}
57243 +
57244 +int
57245 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57246 +{
57247 + return 0;
57248 +}
57249 +
57250 +int
57251 +gr_search_accept(const struct socket *sock)
57252 +{
57253 + return 0;
57254 +}
57255 +
57256 +int
57257 +gr_search_listen(const struct socket *sock)
57258 +{
57259 + return 0;
57260 +}
57261 +
57262 +int
57263 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57264 +{
57265 + return 0;
57266 +}
57267 +
57268 +__u32
57269 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57270 +{
57271 + return 1;
57272 +}
57273 +
57274 +__u32
57275 +gr_acl_handle_creat(const struct dentry * dentry,
57276 + const struct dentry * p_dentry,
57277 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57278 + const int imode)
57279 +{
57280 + return 1;
57281 +}
57282 +
57283 +void
57284 +gr_acl_handle_exit(void)
57285 +{
57286 + return;
57287 +}
57288 +
57289 +int
57290 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57291 +{
57292 + return 1;
57293 +}
57294 +
57295 +void
57296 +gr_set_role_label(const uid_t uid, const gid_t gid)
57297 +{
57298 + return;
57299 +}
57300 +
57301 +int
57302 +gr_acl_handle_procpidmem(const struct task_struct *task)
57303 +{
57304 + return 0;
57305 +}
57306 +
57307 +int
57308 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57309 +{
57310 + return 0;
57311 +}
57312 +
57313 +int
57314 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57315 +{
57316 + return 0;
57317 +}
57318 +
57319 +void
57320 +gr_set_kernel_label(struct task_struct *task)
57321 +{
57322 + return;
57323 +}
57324 +
57325 +int
57326 +gr_check_user_change(int real, int effective, int fs)
57327 +{
57328 + return 0;
57329 +}
57330 +
57331 +int
57332 +gr_check_group_change(int real, int effective, int fs)
57333 +{
57334 + return 0;
57335 +}
57336 +
57337 +int gr_acl_enable_at_secure(void)
57338 +{
57339 + return 0;
57340 +}
57341 +
57342 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57343 +{
57344 + return dentry->d_inode->i_sb->s_dev;
57345 +}
57346 +
57347 +EXPORT_SYMBOL(gr_learn_resource);
57348 +EXPORT_SYMBOL(gr_set_kernel_label);
57349 +#ifdef CONFIG_SECURITY
57350 +EXPORT_SYMBOL(gr_check_user_change);
57351 +EXPORT_SYMBOL(gr_check_group_change);
57352 +#endif
57353 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57354 new file mode 100644
57355 index 0000000..abfa971
57356 --- /dev/null
57357 +++ b/grsecurity/grsec_exec.c
57358 @@ -0,0 +1,174 @@
57359 +#include <linux/kernel.h>
57360 +#include <linux/sched.h>
57361 +#include <linux/file.h>
57362 +#include <linux/binfmts.h>
57363 +#include <linux/fs.h>
57364 +#include <linux/types.h>
57365 +#include <linux/grdefs.h>
57366 +#include <linux/grsecurity.h>
57367 +#include <linux/grinternal.h>
57368 +#include <linux/capability.h>
57369 +#include <linux/module.h>
57370 +
57371 +#include <asm/uaccess.h>
57372 +
57373 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57374 +static char gr_exec_arg_buf[132];
57375 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57376 +#endif
57377 +
57378 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57379 +
57380 +void
57381 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57382 +{
57383 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57384 + char *grarg = gr_exec_arg_buf;
57385 + unsigned int i, x, execlen = 0;
57386 + char c;
57387 +
57388 + if (!((grsec_enable_execlog && grsec_enable_group &&
57389 + in_group_p(grsec_audit_gid))
57390 + || (grsec_enable_execlog && !grsec_enable_group)))
57391 + return;
57392 +
57393 + mutex_lock(&gr_exec_arg_mutex);
57394 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57395 +
57396 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57397 + const char __user *p;
57398 + unsigned int len;
57399 +
57400 + p = get_user_arg_ptr(argv, i);
57401 + if (IS_ERR(p))
57402 + goto log;
57403 +
57404 + len = strnlen_user(p, 128 - execlen);
57405 + if (len > 128 - execlen)
57406 + len = 128 - execlen;
57407 + else if (len > 0)
57408 + len--;
57409 + if (copy_from_user(grarg + execlen, p, len))
57410 + goto log;
57411 +
57412 + /* rewrite unprintable characters */
57413 + for (x = 0; x < len; x++) {
57414 + c = *(grarg + execlen + x);
57415 + if (c < 32 || c > 126)
57416 + *(grarg + execlen + x) = ' ';
57417 + }
57418 +
57419 + execlen += len;
57420 + *(grarg + execlen) = ' ';
57421 + *(grarg + execlen + 1) = '\0';
57422 + execlen++;
57423 + }
57424 +
57425 + log:
57426 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57427 + bprm->file->f_path.mnt, grarg);
57428 + mutex_unlock(&gr_exec_arg_mutex);
57429 +#endif
57430 + return;
57431 +}
57432 +
57433 +#ifdef CONFIG_GRKERNSEC
57434 +extern int gr_acl_is_capable(const int cap);
57435 +extern int gr_acl_is_capable_nolog(const int cap);
57436 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57437 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57438 +extern int gr_chroot_is_capable(const int cap);
57439 +extern int gr_chroot_is_capable_nolog(const int cap);
57440 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57441 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57442 +#endif
57443 +
57444 +const char *captab_log[] = {
57445 + "CAP_CHOWN",
57446 + "CAP_DAC_OVERRIDE",
57447 + "CAP_DAC_READ_SEARCH",
57448 + "CAP_FOWNER",
57449 + "CAP_FSETID",
57450 + "CAP_KILL",
57451 + "CAP_SETGID",
57452 + "CAP_SETUID",
57453 + "CAP_SETPCAP",
57454 + "CAP_LINUX_IMMUTABLE",
57455 + "CAP_NET_BIND_SERVICE",
57456 + "CAP_NET_BROADCAST",
57457 + "CAP_NET_ADMIN",
57458 + "CAP_NET_RAW",
57459 + "CAP_IPC_LOCK",
57460 + "CAP_IPC_OWNER",
57461 + "CAP_SYS_MODULE",
57462 + "CAP_SYS_RAWIO",
57463 + "CAP_SYS_CHROOT",
57464 + "CAP_SYS_PTRACE",
57465 + "CAP_SYS_PACCT",
57466 + "CAP_SYS_ADMIN",
57467 + "CAP_SYS_BOOT",
57468 + "CAP_SYS_NICE",
57469 + "CAP_SYS_RESOURCE",
57470 + "CAP_SYS_TIME",
57471 + "CAP_SYS_TTY_CONFIG",
57472 + "CAP_MKNOD",
57473 + "CAP_LEASE",
57474 + "CAP_AUDIT_WRITE",
57475 + "CAP_AUDIT_CONTROL",
57476 + "CAP_SETFCAP",
57477 + "CAP_MAC_OVERRIDE",
57478 + "CAP_MAC_ADMIN",
57479 + "CAP_SYSLOG",
57480 + "CAP_WAKE_ALARM"
57481 +};
57482 +
57483 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57484 +
57485 +int gr_is_capable(const int cap)
57486 +{
57487 +#ifdef CONFIG_GRKERNSEC
57488 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57489 + return 1;
57490 + return 0;
57491 +#else
57492 + return 1;
57493 +#endif
57494 +}
57495 +
57496 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57497 +{
57498 +#ifdef CONFIG_GRKERNSEC
57499 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57500 + return 1;
57501 + return 0;
57502 +#else
57503 + return 1;
57504 +#endif
57505 +}
57506 +
57507 +int gr_is_capable_nolog(const int cap)
57508 +{
57509 +#ifdef CONFIG_GRKERNSEC
57510 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57511 + return 1;
57512 + return 0;
57513 +#else
57514 + return 1;
57515 +#endif
57516 +}
57517 +
57518 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57519 +{
57520 +#ifdef CONFIG_GRKERNSEC
57521 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57522 + return 1;
57523 + return 0;
57524 +#else
57525 + return 1;
57526 +#endif
57527 +}
57528 +
57529 +EXPORT_SYMBOL(gr_is_capable);
57530 +EXPORT_SYMBOL(gr_is_capable_nolog);
57531 +EXPORT_SYMBOL(gr_task_is_capable);
57532 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
57533 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57534 new file mode 100644
57535 index 0000000..d3ee748
57536 --- /dev/null
57537 +++ b/grsecurity/grsec_fifo.c
57538 @@ -0,0 +1,24 @@
57539 +#include <linux/kernel.h>
57540 +#include <linux/sched.h>
57541 +#include <linux/fs.h>
57542 +#include <linux/file.h>
57543 +#include <linux/grinternal.h>
57544 +
57545 +int
57546 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57547 + const struct dentry *dir, const int flag, const int acc_mode)
57548 +{
57549 +#ifdef CONFIG_GRKERNSEC_FIFO
57550 + const struct cred *cred = current_cred();
57551 +
57552 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57553 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57554 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57555 + (cred->fsuid != dentry->d_inode->i_uid)) {
57556 + if (!inode_permission(dentry->d_inode, acc_mode))
57557 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57558 + return -EACCES;
57559 + }
57560 +#endif
57561 + return 0;
57562 +}
57563 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57564 new file mode 100644
57565 index 0000000..8ca18bf
57566 --- /dev/null
57567 +++ b/grsecurity/grsec_fork.c
57568 @@ -0,0 +1,23 @@
57569 +#include <linux/kernel.h>
57570 +#include <linux/sched.h>
57571 +#include <linux/grsecurity.h>
57572 +#include <linux/grinternal.h>
57573 +#include <linux/errno.h>
57574 +
57575 +void
57576 +gr_log_forkfail(const int retval)
57577 +{
57578 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57579 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57580 + switch (retval) {
57581 + case -EAGAIN:
57582 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57583 + break;
57584 + case -ENOMEM:
57585 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57586 + break;
57587 + }
57588 + }
57589 +#endif
57590 + return;
57591 +}
57592 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57593 new file mode 100644
57594 index 0000000..01ddde4
57595 --- /dev/null
57596 +++ b/grsecurity/grsec_init.c
57597 @@ -0,0 +1,277 @@
57598 +#include <linux/kernel.h>
57599 +#include <linux/sched.h>
57600 +#include <linux/mm.h>
57601 +#include <linux/gracl.h>
57602 +#include <linux/slab.h>
57603 +#include <linux/vmalloc.h>
57604 +#include <linux/percpu.h>
57605 +#include <linux/module.h>
57606 +
57607 +int grsec_enable_ptrace_readexec;
57608 +int grsec_enable_setxid;
57609 +int grsec_enable_brute;
57610 +int grsec_enable_link;
57611 +int grsec_enable_dmesg;
57612 +int grsec_enable_harden_ptrace;
57613 +int grsec_enable_fifo;
57614 +int grsec_enable_execlog;
57615 +int grsec_enable_signal;
57616 +int grsec_enable_forkfail;
57617 +int grsec_enable_audit_ptrace;
57618 +int grsec_enable_time;
57619 +int grsec_enable_audit_textrel;
57620 +int grsec_enable_group;
57621 +int grsec_audit_gid;
57622 +int grsec_enable_chdir;
57623 +int grsec_enable_mount;
57624 +int grsec_enable_rofs;
57625 +int grsec_enable_chroot_findtask;
57626 +int grsec_enable_chroot_mount;
57627 +int grsec_enable_chroot_shmat;
57628 +int grsec_enable_chroot_fchdir;
57629 +int grsec_enable_chroot_double;
57630 +int grsec_enable_chroot_pivot;
57631 +int grsec_enable_chroot_chdir;
57632 +int grsec_enable_chroot_chmod;
57633 +int grsec_enable_chroot_mknod;
57634 +int grsec_enable_chroot_nice;
57635 +int grsec_enable_chroot_execlog;
57636 +int grsec_enable_chroot_caps;
57637 +int grsec_enable_chroot_sysctl;
57638 +int grsec_enable_chroot_unix;
57639 +int grsec_enable_tpe;
57640 +int grsec_tpe_gid;
57641 +int grsec_enable_blackhole;
57642 +#ifdef CONFIG_IPV6_MODULE
57643 +EXPORT_SYMBOL(grsec_enable_blackhole);
57644 +#endif
57645 +int grsec_lastack_retries;
57646 +int grsec_enable_tpe_all;
57647 +int grsec_enable_tpe_invert;
57648 +int grsec_enable_socket_all;
57649 +int grsec_socket_all_gid;
57650 +int grsec_enable_socket_client;
57651 +int grsec_socket_client_gid;
57652 +int grsec_enable_socket_server;
57653 +int grsec_socket_server_gid;
57654 +int grsec_resource_logging;
57655 +int grsec_disable_privio;
57656 +int grsec_enable_log_rwxmaps;
57657 +int grsec_lock;
57658 +
57659 +DEFINE_SPINLOCK(grsec_alert_lock);
57660 +unsigned long grsec_alert_wtime = 0;
57661 +unsigned long grsec_alert_fyet = 0;
57662 +
57663 +DEFINE_SPINLOCK(grsec_audit_lock);
57664 +
57665 +DEFINE_RWLOCK(grsec_exec_file_lock);
57666 +
57667 +char *gr_shared_page[4];
57668 +
57669 +char *gr_alert_log_fmt;
57670 +char *gr_audit_log_fmt;
57671 +char *gr_alert_log_buf;
57672 +char *gr_audit_log_buf;
57673 +
57674 +extern struct gr_arg *gr_usermode;
57675 +extern unsigned char *gr_system_salt;
57676 +extern unsigned char *gr_system_sum;
57677 +
57678 +void __init
57679 +grsecurity_init(void)
57680 +{
57681 + int j;
57682 + /* create the per-cpu shared pages */
57683 +
57684 +#ifdef CONFIG_X86
57685 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57686 +#endif
57687 +
57688 + for (j = 0; j < 4; j++) {
57689 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57690 + if (gr_shared_page[j] == NULL) {
57691 + panic("Unable to allocate grsecurity shared page");
57692 + return;
57693 + }
57694 + }
57695 +
57696 + /* allocate log buffers */
57697 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57698 + if (!gr_alert_log_fmt) {
57699 + panic("Unable to allocate grsecurity alert log format buffer");
57700 + return;
57701 + }
57702 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57703 + if (!gr_audit_log_fmt) {
57704 + panic("Unable to allocate grsecurity audit log format buffer");
57705 + return;
57706 + }
57707 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57708 + if (!gr_alert_log_buf) {
57709 + panic("Unable to allocate grsecurity alert log buffer");
57710 + return;
57711 + }
57712 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57713 + if (!gr_audit_log_buf) {
57714 + panic("Unable to allocate grsecurity audit log buffer");
57715 + return;
57716 + }
57717 +
57718 + /* allocate memory for authentication structure */
57719 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57720 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57721 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57722 +
57723 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57724 + panic("Unable to allocate grsecurity authentication structure");
57725 + return;
57726 + }
57727 +
57728 +
57729 +#ifdef CONFIG_GRKERNSEC_IO
57730 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57731 + grsec_disable_privio = 1;
57732 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57733 + grsec_disable_privio = 1;
57734 +#else
57735 + grsec_disable_privio = 0;
57736 +#endif
57737 +#endif
57738 +
57739 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57740 + /* for backward compatibility, tpe_invert always defaults to on if
57741 + enabled in the kernel
57742 + */
57743 + grsec_enable_tpe_invert = 1;
57744 +#endif
57745 +
57746 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57747 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57748 + grsec_lock = 1;
57749 +#endif
57750 +
57751 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57752 + grsec_enable_audit_textrel = 1;
57753 +#endif
57754 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57755 + grsec_enable_log_rwxmaps = 1;
57756 +#endif
57757 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57758 + grsec_enable_group = 1;
57759 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57760 +#endif
57761 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57762 + grsec_enable_ptrace_readexec = 1;
57763 +#endif
57764 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57765 + grsec_enable_chdir = 1;
57766 +#endif
57767 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57768 + grsec_enable_harden_ptrace = 1;
57769 +#endif
57770 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57771 + grsec_enable_mount = 1;
57772 +#endif
57773 +#ifdef CONFIG_GRKERNSEC_LINK
57774 + grsec_enable_link = 1;
57775 +#endif
57776 +#ifdef CONFIG_GRKERNSEC_BRUTE
57777 + grsec_enable_brute = 1;
57778 +#endif
57779 +#ifdef CONFIG_GRKERNSEC_DMESG
57780 + grsec_enable_dmesg = 1;
57781 +#endif
57782 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57783 + grsec_enable_blackhole = 1;
57784 + grsec_lastack_retries = 4;
57785 +#endif
57786 +#ifdef CONFIG_GRKERNSEC_FIFO
57787 + grsec_enable_fifo = 1;
57788 +#endif
57789 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57790 + grsec_enable_execlog = 1;
57791 +#endif
57792 +#ifdef CONFIG_GRKERNSEC_SETXID
57793 + grsec_enable_setxid = 1;
57794 +#endif
57795 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57796 + grsec_enable_signal = 1;
57797 +#endif
57798 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57799 + grsec_enable_forkfail = 1;
57800 +#endif
57801 +#ifdef CONFIG_GRKERNSEC_TIME
57802 + grsec_enable_time = 1;
57803 +#endif
57804 +#ifdef CONFIG_GRKERNSEC_RESLOG
57805 + grsec_resource_logging = 1;
57806 +#endif
57807 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57808 + grsec_enable_chroot_findtask = 1;
57809 +#endif
57810 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57811 + grsec_enable_chroot_unix = 1;
57812 +#endif
57813 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57814 + grsec_enable_chroot_mount = 1;
57815 +#endif
57816 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57817 + grsec_enable_chroot_fchdir = 1;
57818 +#endif
57819 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57820 + grsec_enable_chroot_shmat = 1;
57821 +#endif
57822 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57823 + grsec_enable_audit_ptrace = 1;
57824 +#endif
57825 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57826 + grsec_enable_chroot_double = 1;
57827 +#endif
57828 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57829 + grsec_enable_chroot_pivot = 1;
57830 +#endif
57831 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57832 + grsec_enable_chroot_chdir = 1;
57833 +#endif
57834 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57835 + grsec_enable_chroot_chmod = 1;
57836 +#endif
57837 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57838 + grsec_enable_chroot_mknod = 1;
57839 +#endif
57840 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57841 + grsec_enable_chroot_nice = 1;
57842 +#endif
57843 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57844 + grsec_enable_chroot_execlog = 1;
57845 +#endif
57846 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57847 + grsec_enable_chroot_caps = 1;
57848 +#endif
57849 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57850 + grsec_enable_chroot_sysctl = 1;
57851 +#endif
57852 +#ifdef CONFIG_GRKERNSEC_TPE
57853 + grsec_enable_tpe = 1;
57854 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57855 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57856 + grsec_enable_tpe_all = 1;
57857 +#endif
57858 +#endif
57859 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57860 + grsec_enable_socket_all = 1;
57861 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57862 +#endif
57863 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57864 + grsec_enable_socket_client = 1;
57865 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57866 +#endif
57867 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57868 + grsec_enable_socket_server = 1;
57869 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57870 +#endif
57871 +#endif
57872 +
57873 + return;
57874 +}
57875 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57876 new file mode 100644
57877 index 0000000..3efe141
57878 --- /dev/null
57879 +++ b/grsecurity/grsec_link.c
57880 @@ -0,0 +1,43 @@
57881 +#include <linux/kernel.h>
57882 +#include <linux/sched.h>
57883 +#include <linux/fs.h>
57884 +#include <linux/file.h>
57885 +#include <linux/grinternal.h>
57886 +
57887 +int
57888 +gr_handle_follow_link(const struct inode *parent,
57889 + const struct inode *inode,
57890 + const struct dentry *dentry, const struct vfsmount *mnt)
57891 +{
57892 +#ifdef CONFIG_GRKERNSEC_LINK
57893 + const struct cred *cred = current_cred();
57894 +
57895 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57896 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57897 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57898 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57899 + return -EACCES;
57900 + }
57901 +#endif
57902 + return 0;
57903 +}
57904 +
57905 +int
57906 +gr_handle_hardlink(const struct dentry *dentry,
57907 + const struct vfsmount *mnt,
57908 + struct inode *inode, const int mode, const char *to)
57909 +{
57910 +#ifdef CONFIG_GRKERNSEC_LINK
57911 + const struct cred *cred = current_cred();
57912 +
57913 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57914 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57915 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57916 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57917 + !capable(CAP_FOWNER) && cred->uid) {
57918 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57919 + return -EPERM;
57920 + }
57921 +#endif
57922 + return 0;
57923 +}
57924 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57925 new file mode 100644
57926 index 0000000..a45d2e9
57927 --- /dev/null
57928 +++ b/grsecurity/grsec_log.c
57929 @@ -0,0 +1,322 @@
57930 +#include <linux/kernel.h>
57931 +#include <linux/sched.h>
57932 +#include <linux/file.h>
57933 +#include <linux/tty.h>
57934 +#include <linux/fs.h>
57935 +#include <linux/grinternal.h>
57936 +
57937 +#ifdef CONFIG_TREE_PREEMPT_RCU
57938 +#define DISABLE_PREEMPT() preempt_disable()
57939 +#define ENABLE_PREEMPT() preempt_enable()
57940 +#else
57941 +#define DISABLE_PREEMPT()
57942 +#define ENABLE_PREEMPT()
57943 +#endif
57944 +
57945 +#define BEGIN_LOCKS(x) \
57946 + DISABLE_PREEMPT(); \
57947 + rcu_read_lock(); \
57948 + read_lock(&tasklist_lock); \
57949 + read_lock(&grsec_exec_file_lock); \
57950 + if (x != GR_DO_AUDIT) \
57951 + spin_lock(&grsec_alert_lock); \
57952 + else \
57953 + spin_lock(&grsec_audit_lock)
57954 +
57955 +#define END_LOCKS(x) \
57956 + if (x != GR_DO_AUDIT) \
57957 + spin_unlock(&grsec_alert_lock); \
57958 + else \
57959 + spin_unlock(&grsec_audit_lock); \
57960 + read_unlock(&grsec_exec_file_lock); \
57961 + read_unlock(&tasklist_lock); \
57962 + rcu_read_unlock(); \
57963 + ENABLE_PREEMPT(); \
57964 + if (x == GR_DONT_AUDIT) \
57965 + gr_handle_alertkill(current)
57966 +
57967 +enum {
57968 + FLOODING,
57969 + NO_FLOODING
57970 +};
57971 +
57972 +extern char *gr_alert_log_fmt;
57973 +extern char *gr_audit_log_fmt;
57974 +extern char *gr_alert_log_buf;
57975 +extern char *gr_audit_log_buf;
57976 +
57977 +static int gr_log_start(int audit)
57978 +{
57979 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57980 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57981 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57982 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57983 + unsigned long curr_secs = get_seconds();
57984 +
57985 + if (audit == GR_DO_AUDIT)
57986 + goto set_fmt;
57987 +
57988 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57989 + grsec_alert_wtime = curr_secs;
57990 + grsec_alert_fyet = 0;
57991 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57992 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57993 + grsec_alert_fyet++;
57994 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57995 + grsec_alert_wtime = curr_secs;
57996 + grsec_alert_fyet++;
57997 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57998 + return FLOODING;
57999 + }
58000 + else return FLOODING;
58001 +
58002 +set_fmt:
58003 +#endif
58004 + memset(buf, 0, PAGE_SIZE);
58005 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
58006 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58007 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58008 + } else if (current->signal->curr_ip) {
58009 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58010 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58011 + } else if (gr_acl_is_enabled()) {
58012 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58013 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58014 + } else {
58015 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58016 + strcpy(buf, fmt);
58017 + }
58018 +
58019 + return NO_FLOODING;
58020 +}
58021 +
58022 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58023 + __attribute__ ((format (printf, 2, 0)));
58024 +
58025 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58026 +{
58027 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58028 + unsigned int len = strlen(buf);
58029 +
58030 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58031 +
58032 + return;
58033 +}
58034 +
58035 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58036 + __attribute__ ((format (printf, 2, 3)));
58037 +
58038 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58039 +{
58040 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58041 + unsigned int len = strlen(buf);
58042 + va_list ap;
58043 +
58044 + va_start(ap, msg);
58045 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58046 + va_end(ap);
58047 +
58048 + return;
58049 +}
58050 +
58051 +static void gr_log_end(int audit, int append_default)
58052 +{
58053 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58054 +
58055 + if (append_default) {
58056 + unsigned int len = strlen(buf);
58057 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58058 + }
58059 +
58060 + printk("%s\n", buf);
58061 +
58062 + return;
58063 +}
58064 +
58065 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58066 +{
58067 + int logtype;
58068 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58069 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58070 + void *voidptr = NULL;
58071 + int num1 = 0, num2 = 0;
58072 + unsigned long ulong1 = 0, ulong2 = 0;
58073 + struct dentry *dentry = NULL;
58074 + struct vfsmount *mnt = NULL;
58075 + struct file *file = NULL;
58076 + struct task_struct *task = NULL;
58077 + const struct cred *cred, *pcred;
58078 + va_list ap;
58079 +
58080 + BEGIN_LOCKS(audit);
58081 + logtype = gr_log_start(audit);
58082 + if (logtype == FLOODING) {
58083 + END_LOCKS(audit);
58084 + return;
58085 + }
58086 + va_start(ap, argtypes);
58087 + switch (argtypes) {
58088 + case GR_TTYSNIFF:
58089 + task = va_arg(ap, struct task_struct *);
58090 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58091 + break;
58092 + case GR_SYSCTL_HIDDEN:
58093 + str1 = va_arg(ap, char *);
58094 + gr_log_middle_varargs(audit, msg, result, str1);
58095 + break;
58096 + case GR_RBAC:
58097 + dentry = va_arg(ap, struct dentry *);
58098 + mnt = va_arg(ap, struct vfsmount *);
58099 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58100 + break;
58101 + case GR_RBAC_STR:
58102 + dentry = va_arg(ap, struct dentry *);
58103 + mnt = va_arg(ap, struct vfsmount *);
58104 + str1 = va_arg(ap, char *);
58105 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58106 + break;
58107 + case GR_STR_RBAC:
58108 + str1 = va_arg(ap, char *);
58109 + dentry = va_arg(ap, struct dentry *);
58110 + mnt = va_arg(ap, struct vfsmount *);
58111 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58112 + break;
58113 + case GR_RBAC_MODE2:
58114 + dentry = va_arg(ap, struct dentry *);
58115 + mnt = va_arg(ap, struct vfsmount *);
58116 + str1 = va_arg(ap, char *);
58117 + str2 = va_arg(ap, char *);
58118 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58119 + break;
58120 + case GR_RBAC_MODE3:
58121 + dentry = va_arg(ap, struct dentry *);
58122 + mnt = va_arg(ap, struct vfsmount *);
58123 + str1 = va_arg(ap, char *);
58124 + str2 = va_arg(ap, char *);
58125 + str3 = va_arg(ap, char *);
58126 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58127 + break;
58128 + case GR_FILENAME:
58129 + dentry = va_arg(ap, struct dentry *);
58130 + mnt = va_arg(ap, struct vfsmount *);
58131 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58132 + break;
58133 + case GR_STR_FILENAME:
58134 + str1 = va_arg(ap, char *);
58135 + dentry = va_arg(ap, struct dentry *);
58136 + mnt = va_arg(ap, struct vfsmount *);
58137 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58138 + break;
58139 + case GR_FILENAME_STR:
58140 + dentry = va_arg(ap, struct dentry *);
58141 + mnt = va_arg(ap, struct vfsmount *);
58142 + str1 = va_arg(ap, char *);
58143 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58144 + break;
58145 + case GR_FILENAME_TWO_INT:
58146 + dentry = va_arg(ap, struct dentry *);
58147 + mnt = va_arg(ap, struct vfsmount *);
58148 + num1 = va_arg(ap, int);
58149 + num2 = va_arg(ap, int);
58150 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58151 + break;
58152 + case GR_FILENAME_TWO_INT_STR:
58153 + dentry = va_arg(ap, struct dentry *);
58154 + mnt = va_arg(ap, struct vfsmount *);
58155 + num1 = va_arg(ap, int);
58156 + num2 = va_arg(ap, int);
58157 + str1 = va_arg(ap, char *);
58158 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58159 + break;
58160 + case GR_TEXTREL:
58161 + file = va_arg(ap, struct file *);
58162 + ulong1 = va_arg(ap, unsigned long);
58163 + ulong2 = va_arg(ap, unsigned long);
58164 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58165 + break;
58166 + case GR_PTRACE:
58167 + task = va_arg(ap, struct task_struct *);
58168 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58169 + break;
58170 + case GR_RESOURCE:
58171 + task = va_arg(ap, struct task_struct *);
58172 + cred = __task_cred(task);
58173 + pcred = __task_cred(task->real_parent);
58174 + ulong1 = va_arg(ap, unsigned long);
58175 + str1 = va_arg(ap, char *);
58176 + ulong2 = va_arg(ap, unsigned long);
58177 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58178 + break;
58179 + case GR_CAP:
58180 + task = va_arg(ap, struct task_struct *);
58181 + cred = __task_cred(task);
58182 + pcred = __task_cred(task->real_parent);
58183 + str1 = va_arg(ap, char *);
58184 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58185 + break;
58186 + case GR_SIG:
58187 + str1 = va_arg(ap, char *);
58188 + voidptr = va_arg(ap, void *);
58189 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58190 + break;
58191 + case GR_SIG2:
58192 + task = va_arg(ap, struct task_struct *);
58193 + cred = __task_cred(task);
58194 + pcred = __task_cred(task->real_parent);
58195 + num1 = va_arg(ap, int);
58196 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58197 + break;
58198 + case GR_CRASH1:
58199 + task = va_arg(ap, struct task_struct *);
58200 + cred = __task_cred(task);
58201 + pcred = __task_cred(task->real_parent);
58202 + ulong1 = va_arg(ap, unsigned long);
58203 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58204 + break;
58205 + case GR_CRASH2:
58206 + task = va_arg(ap, struct task_struct *);
58207 + cred = __task_cred(task);
58208 + pcred = __task_cred(task->real_parent);
58209 + ulong1 = va_arg(ap, unsigned long);
58210 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58211 + break;
58212 + case GR_RWXMAP:
58213 + file = va_arg(ap, struct file *);
58214 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58215 + break;
58216 + case GR_PSACCT:
58217 + {
58218 + unsigned int wday, cday;
58219 + __u8 whr, chr;
58220 + __u8 wmin, cmin;
58221 + __u8 wsec, csec;
58222 + char cur_tty[64] = { 0 };
58223 + char parent_tty[64] = { 0 };
58224 +
58225 + task = va_arg(ap, struct task_struct *);
58226 + wday = va_arg(ap, unsigned int);
58227 + cday = va_arg(ap, unsigned int);
58228 + whr = va_arg(ap, int);
58229 + chr = va_arg(ap, int);
58230 + wmin = va_arg(ap, int);
58231 + cmin = va_arg(ap, int);
58232 + wsec = va_arg(ap, int);
58233 + csec = va_arg(ap, int);
58234 + ulong1 = va_arg(ap, unsigned long);
58235 + cred = __task_cred(task);
58236 + pcred = __task_cred(task->real_parent);
58237 +
58238 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58239 + }
58240 + break;
58241 + default:
58242 + gr_log_middle(audit, msg, ap);
58243 + }
58244 + va_end(ap);
58245 + // these don't need DEFAULTSECARGS printed on the end
58246 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58247 + gr_log_end(audit, 0);
58248 + else
58249 + gr_log_end(audit, 1);
58250 + END_LOCKS(audit);
58251 +}
58252 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58253 new file mode 100644
58254 index 0000000..f536303
58255 --- /dev/null
58256 +++ b/grsecurity/grsec_mem.c
58257 @@ -0,0 +1,40 @@
58258 +#include <linux/kernel.h>
58259 +#include <linux/sched.h>
58260 +#include <linux/mm.h>
58261 +#include <linux/mman.h>
58262 +#include <linux/grinternal.h>
58263 +
58264 +void
58265 +gr_handle_ioperm(void)
58266 +{
58267 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58268 + return;
58269 +}
58270 +
58271 +void
58272 +gr_handle_iopl(void)
58273 +{
58274 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58275 + return;
58276 +}
58277 +
58278 +void
58279 +gr_handle_mem_readwrite(u64 from, u64 to)
58280 +{
58281 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58282 + return;
58283 +}
58284 +
58285 +void
58286 +gr_handle_vm86(void)
58287 +{
58288 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58289 + return;
58290 +}
58291 +
58292 +void
58293 +gr_log_badprocpid(const char *entry)
58294 +{
58295 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58296 + return;
58297 +}
58298 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58299 new file mode 100644
58300 index 0000000..2131422
58301 --- /dev/null
58302 +++ b/grsecurity/grsec_mount.c
58303 @@ -0,0 +1,62 @@
58304 +#include <linux/kernel.h>
58305 +#include <linux/sched.h>
58306 +#include <linux/mount.h>
58307 +#include <linux/grsecurity.h>
58308 +#include <linux/grinternal.h>
58309 +
58310 +void
58311 +gr_log_remount(const char *devname, const int retval)
58312 +{
58313 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58314 + if (grsec_enable_mount && (retval >= 0))
58315 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58316 +#endif
58317 + return;
58318 +}
58319 +
58320 +void
58321 +gr_log_unmount(const char *devname, const int retval)
58322 +{
58323 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58324 + if (grsec_enable_mount && (retval >= 0))
58325 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58326 +#endif
58327 + return;
58328 +}
58329 +
58330 +void
58331 +gr_log_mount(const char *from, const char *to, const int retval)
58332 +{
58333 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58334 + if (grsec_enable_mount && (retval >= 0))
58335 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58336 +#endif
58337 + return;
58338 +}
58339 +
58340 +int
58341 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58342 +{
58343 +#ifdef CONFIG_GRKERNSEC_ROFS
58344 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58345 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58346 + return -EPERM;
58347 + } else
58348 + return 0;
58349 +#endif
58350 + return 0;
58351 +}
58352 +
58353 +int
58354 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58355 +{
58356 +#ifdef CONFIG_GRKERNSEC_ROFS
58357 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58358 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58359 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58360 + return -EPERM;
58361 + } else
58362 + return 0;
58363 +#endif
58364 + return 0;
58365 +}
58366 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58367 new file mode 100644
58368 index 0000000..a3b12a0
58369 --- /dev/null
58370 +++ b/grsecurity/grsec_pax.c
58371 @@ -0,0 +1,36 @@
58372 +#include <linux/kernel.h>
58373 +#include <linux/sched.h>
58374 +#include <linux/mm.h>
58375 +#include <linux/file.h>
58376 +#include <linux/grinternal.h>
58377 +#include <linux/grsecurity.h>
58378 +
58379 +void
58380 +gr_log_textrel(struct vm_area_struct * vma)
58381 +{
58382 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58383 + if (grsec_enable_audit_textrel)
58384 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58385 +#endif
58386 + return;
58387 +}
58388 +
58389 +void
58390 +gr_log_rwxmmap(struct file *file)
58391 +{
58392 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58393 + if (grsec_enable_log_rwxmaps)
58394 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58395 +#endif
58396 + return;
58397 +}
58398 +
58399 +void
58400 +gr_log_rwxmprotect(struct file *file)
58401 +{
58402 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58403 + if (grsec_enable_log_rwxmaps)
58404 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58405 +#endif
58406 + return;
58407 +}
58408 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58409 new file mode 100644
58410 index 0000000..f7f29aa
58411 --- /dev/null
58412 +++ b/grsecurity/grsec_ptrace.c
58413 @@ -0,0 +1,30 @@
58414 +#include <linux/kernel.h>
58415 +#include <linux/sched.h>
58416 +#include <linux/grinternal.h>
58417 +#include <linux/security.h>
58418 +
58419 +void
58420 +gr_audit_ptrace(struct task_struct *task)
58421 +{
58422 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58423 + if (grsec_enable_audit_ptrace)
58424 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58425 +#endif
58426 + return;
58427 +}
58428 +
58429 +int
58430 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
58431 +{
58432 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58433 + const struct dentry *dentry = file->f_path.dentry;
58434 + const struct vfsmount *mnt = file->f_path.mnt;
58435 +
58436 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58437 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58438 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58439 + return -EACCES;
58440 + }
58441 +#endif
58442 + return 0;
58443 +}
58444 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58445 new file mode 100644
58446 index 0000000..7a5b2de
58447 --- /dev/null
58448 +++ b/grsecurity/grsec_sig.c
58449 @@ -0,0 +1,207 @@
58450 +#include <linux/kernel.h>
58451 +#include <linux/sched.h>
58452 +#include <linux/delay.h>
58453 +#include <linux/grsecurity.h>
58454 +#include <linux/grinternal.h>
58455 +#include <linux/hardirq.h>
58456 +
58457 +char *signames[] = {
58458 + [SIGSEGV] = "Segmentation fault",
58459 + [SIGILL] = "Illegal instruction",
58460 + [SIGABRT] = "Abort",
58461 + [SIGBUS] = "Invalid alignment/Bus error"
58462 +};
58463 +
58464 +void
58465 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58466 +{
58467 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58468 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58469 + (sig == SIGABRT) || (sig == SIGBUS))) {
58470 + if (t->pid == current->pid) {
58471 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58472 + } else {
58473 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58474 + }
58475 + }
58476 +#endif
58477 + return;
58478 +}
58479 +
58480 +int
58481 +gr_handle_signal(const struct task_struct *p, const int sig)
58482 +{
58483 +#ifdef CONFIG_GRKERNSEC
58484 + /* ignore the 0 signal for protected task checks */
58485 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58486 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58487 + return -EPERM;
58488 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58489 + return -EPERM;
58490 + }
58491 +#endif
58492 + return 0;
58493 +}
58494 +
58495 +#ifdef CONFIG_GRKERNSEC
58496 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58497 +
58498 +int gr_fake_force_sig(int sig, struct task_struct *t)
58499 +{
58500 + unsigned long int flags;
58501 + int ret, blocked, ignored;
58502 + struct k_sigaction *action;
58503 +
58504 + spin_lock_irqsave(&t->sighand->siglock, flags);
58505 + action = &t->sighand->action[sig-1];
58506 + ignored = action->sa.sa_handler == SIG_IGN;
58507 + blocked = sigismember(&t->blocked, sig);
58508 + if (blocked || ignored) {
58509 + action->sa.sa_handler = SIG_DFL;
58510 + if (blocked) {
58511 + sigdelset(&t->blocked, sig);
58512 + recalc_sigpending_and_wake(t);
58513 + }
58514 + }
58515 + if (action->sa.sa_handler == SIG_DFL)
58516 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58517 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58518 +
58519 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58520 +
58521 + return ret;
58522 +}
58523 +#endif
58524 +
58525 +#ifdef CONFIG_GRKERNSEC_BRUTE
58526 +#define GR_USER_BAN_TIME (15 * 60)
58527 +
58528 +static int __get_dumpable(unsigned long mm_flags)
58529 +{
58530 + int ret;
58531 +
58532 + ret = mm_flags & MMF_DUMPABLE_MASK;
58533 + return (ret >= 2) ? 2 : ret;
58534 +}
58535 +#endif
58536 +
58537 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58538 +{
58539 +#ifdef CONFIG_GRKERNSEC_BRUTE
58540 + uid_t uid = 0;
58541 +
58542 + if (!grsec_enable_brute)
58543 + return;
58544 +
58545 + rcu_read_lock();
58546 + read_lock(&tasklist_lock);
58547 + read_lock(&grsec_exec_file_lock);
58548 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58549 + p->real_parent->brute = 1;
58550 + else {
58551 + const struct cred *cred = __task_cred(p), *cred2;
58552 + struct task_struct *tsk, *tsk2;
58553 +
58554 + if (!__get_dumpable(mm_flags) && cred->uid) {
58555 + struct user_struct *user;
58556 +
58557 + uid = cred->uid;
58558 +
58559 + /* this is put upon execution past expiration */
58560 + user = find_user(uid);
58561 + if (user == NULL)
58562 + goto unlock;
58563 + user->banned = 1;
58564 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58565 + if (user->ban_expires == ~0UL)
58566 + user->ban_expires--;
58567 +
58568 + do_each_thread(tsk2, tsk) {
58569 + cred2 = __task_cred(tsk);
58570 + if (tsk != p && cred2->uid == uid)
58571 + gr_fake_force_sig(SIGKILL, tsk);
58572 + } while_each_thread(tsk2, tsk);
58573 + }
58574 + }
58575 +unlock:
58576 + read_unlock(&grsec_exec_file_lock);
58577 + read_unlock(&tasklist_lock);
58578 + rcu_read_unlock();
58579 +
58580 + if (uid)
58581 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58582 +
58583 +#endif
58584 + return;
58585 +}
58586 +
58587 +void gr_handle_brute_check(void)
58588 +{
58589 +#ifdef CONFIG_GRKERNSEC_BRUTE
58590 + if (current->brute)
58591 + msleep(30 * 1000);
58592 +#endif
58593 + return;
58594 +}
58595 +
58596 +void gr_handle_kernel_exploit(void)
58597 +{
58598 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58599 + const struct cred *cred;
58600 + struct task_struct *tsk, *tsk2;
58601 + struct user_struct *user;
58602 + uid_t uid;
58603 +
58604 + if (in_irq() || in_serving_softirq() || in_nmi())
58605 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58606 +
58607 + uid = current_uid();
58608 +
58609 + if (uid == 0)
58610 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58611 + else {
58612 + /* kill all the processes of this user, hold a reference
58613 + to their creds struct, and prevent them from creating
58614 + another process until system reset
58615 + */
58616 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58617 + /* we intentionally leak this ref */
58618 + user = get_uid(current->cred->user);
58619 + if (user) {
58620 + user->banned = 1;
58621 + user->ban_expires = ~0UL;
58622 + }
58623 +
58624 + read_lock(&tasklist_lock);
58625 + do_each_thread(tsk2, tsk) {
58626 + cred = __task_cred(tsk);
58627 + if (cred->uid == uid)
58628 + gr_fake_force_sig(SIGKILL, tsk);
58629 + } while_each_thread(tsk2, tsk);
58630 + read_unlock(&tasklist_lock);
58631 + }
58632 +#endif
58633 +}
58634 +
58635 +int __gr_process_user_ban(struct user_struct *user)
58636 +{
58637 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58638 + if (unlikely(user->banned)) {
58639 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58640 + user->banned = 0;
58641 + user->ban_expires = 0;
58642 + free_uid(user);
58643 + } else
58644 + return -EPERM;
58645 + }
58646 +#endif
58647 + return 0;
58648 +}
58649 +
58650 +int gr_process_user_ban(void)
58651 +{
58652 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58653 + return __gr_process_user_ban(current->cred->user);
58654 +#endif
58655 + return 0;
58656 +}
58657 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58658 new file mode 100644
58659 index 0000000..4030d57
58660 --- /dev/null
58661 +++ b/grsecurity/grsec_sock.c
58662 @@ -0,0 +1,244 @@
58663 +#include <linux/kernel.h>
58664 +#include <linux/module.h>
58665 +#include <linux/sched.h>
58666 +#include <linux/file.h>
58667 +#include <linux/net.h>
58668 +#include <linux/in.h>
58669 +#include <linux/ip.h>
58670 +#include <net/sock.h>
58671 +#include <net/inet_sock.h>
58672 +#include <linux/grsecurity.h>
58673 +#include <linux/grinternal.h>
58674 +#include <linux/gracl.h>
58675 +
58676 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58677 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58678 +
58679 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58680 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58681 +
58682 +#ifdef CONFIG_UNIX_MODULE
58683 +EXPORT_SYMBOL(gr_acl_handle_unix);
58684 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58685 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58686 +EXPORT_SYMBOL(gr_handle_create);
58687 +#endif
58688 +
58689 +#ifdef CONFIG_GRKERNSEC
58690 +#define gr_conn_table_size 32749
58691 +struct conn_table_entry {
58692 + struct conn_table_entry *next;
58693 + struct signal_struct *sig;
58694 +};
58695 +
58696 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58697 +DEFINE_SPINLOCK(gr_conn_table_lock);
58698 +
58699 +extern const char * gr_socktype_to_name(unsigned char type);
58700 +extern const char * gr_proto_to_name(unsigned char proto);
58701 +extern const char * gr_sockfamily_to_name(unsigned char family);
58702 +
58703 +static __inline__ int
58704 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58705 +{
58706 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58707 +}
58708 +
58709 +static __inline__ int
58710 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58711 + __u16 sport, __u16 dport)
58712 +{
58713 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58714 + sig->gr_sport == sport && sig->gr_dport == dport))
58715 + return 1;
58716 + else
58717 + return 0;
58718 +}
58719 +
58720 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58721 +{
58722 + struct conn_table_entry **match;
58723 + unsigned int index;
58724 +
58725 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58726 + sig->gr_sport, sig->gr_dport,
58727 + gr_conn_table_size);
58728 +
58729 + newent->sig = sig;
58730 +
58731 + match = &gr_conn_table[index];
58732 + newent->next = *match;
58733 + *match = newent;
58734 +
58735 + return;
58736 +}
58737 +
58738 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58739 +{
58740 + struct conn_table_entry *match, *last = NULL;
58741 + unsigned int index;
58742 +
58743 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58744 + sig->gr_sport, sig->gr_dport,
58745 + gr_conn_table_size);
58746 +
58747 + match = gr_conn_table[index];
58748 + while (match && !conn_match(match->sig,
58749 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58750 + sig->gr_dport)) {
58751 + last = match;
58752 + match = match->next;
58753 + }
58754 +
58755 + if (match) {
58756 + if (last)
58757 + last->next = match->next;
58758 + else
58759 + gr_conn_table[index] = NULL;
58760 + kfree(match);
58761 + }
58762 +
58763 + return;
58764 +}
58765 +
58766 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58767 + __u16 sport, __u16 dport)
58768 +{
58769 + struct conn_table_entry *match;
58770 + unsigned int index;
58771 +
58772 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58773 +
58774 + match = gr_conn_table[index];
58775 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58776 + match = match->next;
58777 +
58778 + if (match)
58779 + return match->sig;
58780 + else
58781 + return NULL;
58782 +}
58783 +
58784 +#endif
58785 +
58786 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58787 +{
58788 +#ifdef CONFIG_GRKERNSEC
58789 + struct signal_struct *sig = task->signal;
58790 + struct conn_table_entry *newent;
58791 +
58792 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58793 + if (newent == NULL)
58794 + return;
58795 + /* no bh lock needed since we are called with bh disabled */
58796 + spin_lock(&gr_conn_table_lock);
58797 + gr_del_task_from_ip_table_nolock(sig);
58798 + sig->gr_saddr = inet->inet_rcv_saddr;
58799 + sig->gr_daddr = inet->inet_daddr;
58800 + sig->gr_sport = inet->inet_sport;
58801 + sig->gr_dport = inet->inet_dport;
58802 + gr_add_to_task_ip_table_nolock(sig, newent);
58803 + spin_unlock(&gr_conn_table_lock);
58804 +#endif
58805 + return;
58806 +}
58807 +
58808 +void gr_del_task_from_ip_table(struct task_struct *task)
58809 +{
58810 +#ifdef CONFIG_GRKERNSEC
58811 + spin_lock_bh(&gr_conn_table_lock);
58812 + gr_del_task_from_ip_table_nolock(task->signal);
58813 + spin_unlock_bh(&gr_conn_table_lock);
58814 +#endif
58815 + return;
58816 +}
58817 +
58818 +void
58819 +gr_attach_curr_ip(const struct sock *sk)
58820 +{
58821 +#ifdef CONFIG_GRKERNSEC
58822 + struct signal_struct *p, *set;
58823 + const struct inet_sock *inet = inet_sk(sk);
58824 +
58825 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58826 + return;
58827 +
58828 + set = current->signal;
58829 +
58830 + spin_lock_bh(&gr_conn_table_lock);
58831 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58832 + inet->inet_dport, inet->inet_sport);
58833 + if (unlikely(p != NULL)) {
58834 + set->curr_ip = p->curr_ip;
58835 + set->used_accept = 1;
58836 + gr_del_task_from_ip_table_nolock(p);
58837 + spin_unlock_bh(&gr_conn_table_lock);
58838 + return;
58839 + }
58840 + spin_unlock_bh(&gr_conn_table_lock);
58841 +
58842 + set->curr_ip = inet->inet_daddr;
58843 + set->used_accept = 1;
58844 +#endif
58845 + return;
58846 +}
58847 +
58848 +int
58849 +gr_handle_sock_all(const int family, const int type, const int protocol)
58850 +{
58851 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58852 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58853 + (family != AF_UNIX)) {
58854 + if (family == AF_INET)
58855 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58856 + else
58857 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58858 + return -EACCES;
58859 + }
58860 +#endif
58861 + return 0;
58862 +}
58863 +
58864 +int
58865 +gr_handle_sock_server(const struct sockaddr *sck)
58866 +{
58867 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58868 + if (grsec_enable_socket_server &&
58869 + in_group_p(grsec_socket_server_gid) &&
58870 + sck && (sck->sa_family != AF_UNIX) &&
58871 + (sck->sa_family != AF_LOCAL)) {
58872 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58873 + return -EACCES;
58874 + }
58875 +#endif
58876 + return 0;
58877 +}
58878 +
58879 +int
58880 +gr_handle_sock_server_other(const struct sock *sck)
58881 +{
58882 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58883 + if (grsec_enable_socket_server &&
58884 + in_group_p(grsec_socket_server_gid) &&
58885 + sck && (sck->sk_family != AF_UNIX) &&
58886 + (sck->sk_family != AF_LOCAL)) {
58887 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58888 + return -EACCES;
58889 + }
58890 +#endif
58891 + return 0;
58892 +}
58893 +
58894 +int
58895 +gr_handle_sock_client(const struct sockaddr *sck)
58896 +{
58897 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58898 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58899 + sck && (sck->sa_family != AF_UNIX) &&
58900 + (sck->sa_family != AF_LOCAL)) {
58901 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58902 + return -EACCES;
58903 + }
58904 +#endif
58905 + return 0;
58906 +}
58907 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58908 new file mode 100644
58909 index 0000000..a1aedd7
58910 --- /dev/null
58911 +++ b/grsecurity/grsec_sysctl.c
58912 @@ -0,0 +1,451 @@
58913 +#include <linux/kernel.h>
58914 +#include <linux/sched.h>
58915 +#include <linux/sysctl.h>
58916 +#include <linux/grsecurity.h>
58917 +#include <linux/grinternal.h>
58918 +
58919 +int
58920 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58921 +{
58922 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58923 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58924 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58925 + return -EACCES;
58926 + }
58927 +#endif
58928 + return 0;
58929 +}
58930 +
58931 +#ifdef CONFIG_GRKERNSEC_ROFS
58932 +static int __maybe_unused one = 1;
58933 +#endif
58934 +
58935 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58936 +struct ctl_table grsecurity_table[] = {
58937 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58938 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58939 +#ifdef CONFIG_GRKERNSEC_IO
58940 + {
58941 + .procname = "disable_priv_io",
58942 + .data = &grsec_disable_privio,
58943 + .maxlen = sizeof(int),
58944 + .mode = 0600,
58945 + .proc_handler = &proc_dointvec,
58946 + },
58947 +#endif
58948 +#endif
58949 +#ifdef CONFIG_GRKERNSEC_LINK
58950 + {
58951 + .procname = "linking_restrictions",
58952 + .data = &grsec_enable_link,
58953 + .maxlen = sizeof(int),
58954 + .mode = 0600,
58955 + .proc_handler = &proc_dointvec,
58956 + },
58957 +#endif
58958 +#ifdef CONFIG_GRKERNSEC_BRUTE
58959 + {
58960 + .procname = "deter_bruteforce",
58961 + .data = &grsec_enable_brute,
58962 + .maxlen = sizeof(int),
58963 + .mode = 0600,
58964 + .proc_handler = &proc_dointvec,
58965 + },
58966 +#endif
58967 +#ifdef CONFIG_GRKERNSEC_FIFO
58968 + {
58969 + .procname = "fifo_restrictions",
58970 + .data = &grsec_enable_fifo,
58971 + .maxlen = sizeof(int),
58972 + .mode = 0600,
58973 + .proc_handler = &proc_dointvec,
58974 + },
58975 +#endif
58976 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58977 + {
58978 + .procname = "ptrace_readexec",
58979 + .data = &grsec_enable_ptrace_readexec,
58980 + .maxlen = sizeof(int),
58981 + .mode = 0600,
58982 + .proc_handler = &proc_dointvec,
58983 + },
58984 +#endif
58985 +#ifdef CONFIG_GRKERNSEC_SETXID
58986 + {
58987 + .procname = "consistent_setxid",
58988 + .data = &grsec_enable_setxid,
58989 + .maxlen = sizeof(int),
58990 + .mode = 0600,
58991 + .proc_handler = &proc_dointvec,
58992 + },
58993 +#endif
58994 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58995 + {
58996 + .procname = "ip_blackhole",
58997 + .data = &grsec_enable_blackhole,
58998 + .maxlen = sizeof(int),
58999 + .mode = 0600,
59000 + .proc_handler = &proc_dointvec,
59001 + },
59002 + {
59003 + .procname = "lastack_retries",
59004 + .data = &grsec_lastack_retries,
59005 + .maxlen = sizeof(int),
59006 + .mode = 0600,
59007 + .proc_handler = &proc_dointvec,
59008 + },
59009 +#endif
59010 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59011 + {
59012 + .procname = "exec_logging",
59013 + .data = &grsec_enable_execlog,
59014 + .maxlen = sizeof(int),
59015 + .mode = 0600,
59016 + .proc_handler = &proc_dointvec,
59017 + },
59018 +#endif
59019 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59020 + {
59021 + .procname = "rwxmap_logging",
59022 + .data = &grsec_enable_log_rwxmaps,
59023 + .maxlen = sizeof(int),
59024 + .mode = 0600,
59025 + .proc_handler = &proc_dointvec,
59026 + },
59027 +#endif
59028 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59029 + {
59030 + .procname = "signal_logging",
59031 + .data = &grsec_enable_signal,
59032 + .maxlen = sizeof(int),
59033 + .mode = 0600,
59034 + .proc_handler = &proc_dointvec,
59035 + },
59036 +#endif
59037 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59038 + {
59039 + .procname = "forkfail_logging",
59040 + .data = &grsec_enable_forkfail,
59041 + .maxlen = sizeof(int),
59042 + .mode = 0600,
59043 + .proc_handler = &proc_dointvec,
59044 + },
59045 +#endif
59046 +#ifdef CONFIG_GRKERNSEC_TIME
59047 + {
59048 + .procname = "timechange_logging",
59049 + .data = &grsec_enable_time,
59050 + .maxlen = sizeof(int),
59051 + .mode = 0600,
59052 + .proc_handler = &proc_dointvec,
59053 + },
59054 +#endif
59055 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59056 + {
59057 + .procname = "chroot_deny_shmat",
59058 + .data = &grsec_enable_chroot_shmat,
59059 + .maxlen = sizeof(int),
59060 + .mode = 0600,
59061 + .proc_handler = &proc_dointvec,
59062 + },
59063 +#endif
59064 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59065 + {
59066 + .procname = "chroot_deny_unix",
59067 + .data = &grsec_enable_chroot_unix,
59068 + .maxlen = sizeof(int),
59069 + .mode = 0600,
59070 + .proc_handler = &proc_dointvec,
59071 + },
59072 +#endif
59073 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59074 + {
59075 + .procname = "chroot_deny_mount",
59076 + .data = &grsec_enable_chroot_mount,
59077 + .maxlen = sizeof(int),
59078 + .mode = 0600,
59079 + .proc_handler = &proc_dointvec,
59080 + },
59081 +#endif
59082 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59083 + {
59084 + .procname = "chroot_deny_fchdir",
59085 + .data = &grsec_enable_chroot_fchdir,
59086 + .maxlen = sizeof(int),
59087 + .mode = 0600,
59088 + .proc_handler = &proc_dointvec,
59089 + },
59090 +#endif
59091 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59092 + {
59093 + .procname = "chroot_deny_chroot",
59094 + .data = &grsec_enable_chroot_double,
59095 + .maxlen = sizeof(int),
59096 + .mode = 0600,
59097 + .proc_handler = &proc_dointvec,
59098 + },
59099 +#endif
59100 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59101 + {
59102 + .procname = "chroot_deny_pivot",
59103 + .data = &grsec_enable_chroot_pivot,
59104 + .maxlen = sizeof(int),
59105 + .mode = 0600,
59106 + .proc_handler = &proc_dointvec,
59107 + },
59108 +#endif
59109 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59110 + {
59111 + .procname = "chroot_enforce_chdir",
59112 + .data = &grsec_enable_chroot_chdir,
59113 + .maxlen = sizeof(int),
59114 + .mode = 0600,
59115 + .proc_handler = &proc_dointvec,
59116 + },
59117 +#endif
59118 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59119 + {
59120 + .procname = "chroot_deny_chmod",
59121 + .data = &grsec_enable_chroot_chmod,
59122 + .maxlen = sizeof(int),
59123 + .mode = 0600,
59124 + .proc_handler = &proc_dointvec,
59125 + },
59126 +#endif
59127 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59128 + {
59129 + .procname = "chroot_deny_mknod",
59130 + .data = &grsec_enable_chroot_mknod,
59131 + .maxlen = sizeof(int),
59132 + .mode = 0600,
59133 + .proc_handler = &proc_dointvec,
59134 + },
59135 +#endif
59136 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59137 + {
59138 + .procname = "chroot_restrict_nice",
59139 + .data = &grsec_enable_chroot_nice,
59140 + .maxlen = sizeof(int),
59141 + .mode = 0600,
59142 + .proc_handler = &proc_dointvec,
59143 + },
59144 +#endif
59145 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59146 + {
59147 + .procname = "chroot_execlog",
59148 + .data = &grsec_enable_chroot_execlog,
59149 + .maxlen = sizeof(int),
59150 + .mode = 0600,
59151 + .proc_handler = &proc_dointvec,
59152 + },
59153 +#endif
59154 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59155 + {
59156 + .procname = "chroot_caps",
59157 + .data = &grsec_enable_chroot_caps,
59158 + .maxlen = sizeof(int),
59159 + .mode = 0600,
59160 + .proc_handler = &proc_dointvec,
59161 + },
59162 +#endif
59163 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59164 + {
59165 + .procname = "chroot_deny_sysctl",
59166 + .data = &grsec_enable_chroot_sysctl,
59167 + .maxlen = sizeof(int),
59168 + .mode = 0600,
59169 + .proc_handler = &proc_dointvec,
59170 + },
59171 +#endif
59172 +#ifdef CONFIG_GRKERNSEC_TPE
59173 + {
59174 + .procname = "tpe",
59175 + .data = &grsec_enable_tpe,
59176 + .maxlen = sizeof(int),
59177 + .mode = 0600,
59178 + .proc_handler = &proc_dointvec,
59179 + },
59180 + {
59181 + .procname = "tpe_gid",
59182 + .data = &grsec_tpe_gid,
59183 + .maxlen = sizeof(int),
59184 + .mode = 0600,
59185 + .proc_handler = &proc_dointvec,
59186 + },
59187 +#endif
59188 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59189 + {
59190 + .procname = "tpe_invert",
59191 + .data = &grsec_enable_tpe_invert,
59192 + .maxlen = sizeof(int),
59193 + .mode = 0600,
59194 + .proc_handler = &proc_dointvec,
59195 + },
59196 +#endif
59197 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59198 + {
59199 + .procname = "tpe_restrict_all",
59200 + .data = &grsec_enable_tpe_all,
59201 + .maxlen = sizeof(int),
59202 + .mode = 0600,
59203 + .proc_handler = &proc_dointvec,
59204 + },
59205 +#endif
59206 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59207 + {
59208 + .procname = "socket_all",
59209 + .data = &grsec_enable_socket_all,
59210 + .maxlen = sizeof(int),
59211 + .mode = 0600,
59212 + .proc_handler = &proc_dointvec,
59213 + },
59214 + {
59215 + .procname = "socket_all_gid",
59216 + .data = &grsec_socket_all_gid,
59217 + .maxlen = sizeof(int),
59218 + .mode = 0600,
59219 + .proc_handler = &proc_dointvec,
59220 + },
59221 +#endif
59222 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59223 + {
59224 + .procname = "socket_client",
59225 + .data = &grsec_enable_socket_client,
59226 + .maxlen = sizeof(int),
59227 + .mode = 0600,
59228 + .proc_handler = &proc_dointvec,
59229 + },
59230 + {
59231 + .procname = "socket_client_gid",
59232 + .data = &grsec_socket_client_gid,
59233 + .maxlen = sizeof(int),
59234 + .mode = 0600,
59235 + .proc_handler = &proc_dointvec,
59236 + },
59237 +#endif
59238 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59239 + {
59240 + .procname = "socket_server",
59241 + .data = &grsec_enable_socket_server,
59242 + .maxlen = sizeof(int),
59243 + .mode = 0600,
59244 + .proc_handler = &proc_dointvec,
59245 + },
59246 + {
59247 + .procname = "socket_server_gid",
59248 + .data = &grsec_socket_server_gid,
59249 + .maxlen = sizeof(int),
59250 + .mode = 0600,
59251 + .proc_handler = &proc_dointvec,
59252 + },
59253 +#endif
59254 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59255 + {
59256 + .procname = "audit_group",
59257 + .data = &grsec_enable_group,
59258 + .maxlen = sizeof(int),
59259 + .mode = 0600,
59260 + .proc_handler = &proc_dointvec,
59261 + },
59262 + {
59263 + .procname = "audit_gid",
59264 + .data = &grsec_audit_gid,
59265 + .maxlen = sizeof(int),
59266 + .mode = 0600,
59267 + .proc_handler = &proc_dointvec,
59268 + },
59269 +#endif
59270 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59271 + {
59272 + .procname = "audit_chdir",
59273 + .data = &grsec_enable_chdir,
59274 + .maxlen = sizeof(int),
59275 + .mode = 0600,
59276 + .proc_handler = &proc_dointvec,
59277 + },
59278 +#endif
59279 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59280 + {
59281 + .procname = "audit_mount",
59282 + .data = &grsec_enable_mount,
59283 + .maxlen = sizeof(int),
59284 + .mode = 0600,
59285 + .proc_handler = &proc_dointvec,
59286 + },
59287 +#endif
59288 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59289 + {
59290 + .procname = "audit_textrel",
59291 + .data = &grsec_enable_audit_textrel,
59292 + .maxlen = sizeof(int),
59293 + .mode = 0600,
59294 + .proc_handler = &proc_dointvec,
59295 + },
59296 +#endif
59297 +#ifdef CONFIG_GRKERNSEC_DMESG
59298 + {
59299 + .procname = "dmesg",
59300 + .data = &grsec_enable_dmesg,
59301 + .maxlen = sizeof(int),
59302 + .mode = 0600,
59303 + .proc_handler = &proc_dointvec,
59304 + },
59305 +#endif
59306 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59307 + {
59308 + .procname = "chroot_findtask",
59309 + .data = &grsec_enable_chroot_findtask,
59310 + .maxlen = sizeof(int),
59311 + .mode = 0600,
59312 + .proc_handler = &proc_dointvec,
59313 + },
59314 +#endif
59315 +#ifdef CONFIG_GRKERNSEC_RESLOG
59316 + {
59317 + .procname = "resource_logging",
59318 + .data = &grsec_resource_logging,
59319 + .maxlen = sizeof(int),
59320 + .mode = 0600,
59321 + .proc_handler = &proc_dointvec,
59322 + },
59323 +#endif
59324 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59325 + {
59326 + .procname = "audit_ptrace",
59327 + .data = &grsec_enable_audit_ptrace,
59328 + .maxlen = sizeof(int),
59329 + .mode = 0600,
59330 + .proc_handler = &proc_dointvec,
59331 + },
59332 +#endif
59333 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59334 + {
59335 + .procname = "harden_ptrace",
59336 + .data = &grsec_enable_harden_ptrace,
59337 + .maxlen = sizeof(int),
59338 + .mode = 0600,
59339 + .proc_handler = &proc_dointvec,
59340 + },
59341 +#endif
59342 + {
59343 + .procname = "grsec_lock",
59344 + .data = &grsec_lock,
59345 + .maxlen = sizeof(int),
59346 + .mode = 0600,
59347 + .proc_handler = &proc_dointvec,
59348 + },
59349 +#endif
59350 +#ifdef CONFIG_GRKERNSEC_ROFS
59351 + {
59352 + .procname = "romount_protect",
59353 + .data = &grsec_enable_rofs,
59354 + .maxlen = sizeof(int),
59355 + .mode = 0600,
59356 + .proc_handler = &proc_dointvec_minmax,
59357 + .extra1 = &one,
59358 + .extra2 = &one,
59359 + },
59360 +#endif
59361 + { }
59362 +};
59363 +#endif
59364 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59365 new file mode 100644
59366 index 0000000..0dc13c3
59367 --- /dev/null
59368 +++ b/grsecurity/grsec_time.c
59369 @@ -0,0 +1,16 @@
59370 +#include <linux/kernel.h>
59371 +#include <linux/sched.h>
59372 +#include <linux/grinternal.h>
59373 +#include <linux/module.h>
59374 +
59375 +void
59376 +gr_log_timechange(void)
59377 +{
59378 +#ifdef CONFIG_GRKERNSEC_TIME
59379 + if (grsec_enable_time)
59380 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59381 +#endif
59382 + return;
59383 +}
59384 +
59385 +EXPORT_SYMBOL(gr_log_timechange);
59386 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59387 new file mode 100644
59388 index 0000000..07e0dc0
59389 --- /dev/null
59390 +++ b/grsecurity/grsec_tpe.c
59391 @@ -0,0 +1,73 @@
59392 +#include <linux/kernel.h>
59393 +#include <linux/sched.h>
59394 +#include <linux/file.h>
59395 +#include <linux/fs.h>
59396 +#include <linux/grinternal.h>
59397 +
59398 +extern int gr_acl_tpe_check(void);
59399 +
59400 +int
59401 +gr_tpe_allow(const struct file *file)
59402 +{
59403 +#ifdef CONFIG_GRKERNSEC
59404 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59405 + const struct cred *cred = current_cred();
59406 + char *msg = NULL;
59407 + char *msg2 = NULL;
59408 +
59409 + // never restrict root
59410 + if (!cred->uid)
59411 + return 1;
59412 +
59413 + if (grsec_enable_tpe) {
59414 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59415 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59416 + msg = "not being in trusted group";
59417 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59418 + msg = "being in untrusted group";
59419 +#else
59420 + if (in_group_p(grsec_tpe_gid))
59421 + msg = "being in untrusted group";
59422 +#endif
59423 + }
59424 + if (!msg && gr_acl_tpe_check())
59425 + msg = "being in untrusted role";
59426 +
59427 + // not in any affected group/role
59428 + if (!msg)
59429 + goto next_check;
59430 +
59431 + if (inode->i_uid)
59432 + msg2 = "file in non-root-owned directory";
59433 + else if (inode->i_mode & S_IWOTH)
59434 + msg2 = "file in world-writable directory";
59435 + else if (inode->i_mode & S_IWGRP)
59436 + msg2 = "file in group-writable directory";
59437 +
59438 + if (msg && msg2) {
59439 + char fullmsg[70] = {0};
59440 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59441 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59442 + return 0;
59443 + }
59444 + msg = NULL;
59445 +next_check:
59446 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59447 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59448 + return 1;
59449 +
59450 + if (inode->i_uid && (inode->i_uid != cred->uid))
59451 + msg = "directory not owned by user";
59452 + else if (inode->i_mode & S_IWOTH)
59453 + msg = "file in world-writable directory";
59454 + else if (inode->i_mode & S_IWGRP)
59455 + msg = "file in group-writable directory";
59456 +
59457 + if (msg) {
59458 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59459 + return 0;
59460 + }
59461 +#endif
59462 +#endif
59463 + return 1;
59464 +}
59465 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59466 new file mode 100644
59467 index 0000000..9f7b1ac
59468 --- /dev/null
59469 +++ b/grsecurity/grsum.c
59470 @@ -0,0 +1,61 @@
59471 +#include <linux/err.h>
59472 +#include <linux/kernel.h>
59473 +#include <linux/sched.h>
59474 +#include <linux/mm.h>
59475 +#include <linux/scatterlist.h>
59476 +#include <linux/crypto.h>
59477 +#include <linux/gracl.h>
59478 +
59479 +
59480 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59481 +#error "crypto and sha256 must be built into the kernel"
59482 +#endif
59483 +
59484 +int
59485 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59486 +{
59487 + char *p;
59488 + struct crypto_hash *tfm;
59489 + struct hash_desc desc;
59490 + struct scatterlist sg;
59491 + unsigned char temp_sum[GR_SHA_LEN];
59492 + volatile int retval = 0;
59493 + volatile int dummy = 0;
59494 + unsigned int i;
59495 +
59496 + sg_init_table(&sg, 1);
59497 +
59498 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59499 + if (IS_ERR(tfm)) {
59500 + /* should never happen, since sha256 should be built in */
59501 + return 1;
59502 + }
59503 +
59504 + desc.tfm = tfm;
59505 + desc.flags = 0;
59506 +
59507 + crypto_hash_init(&desc);
59508 +
59509 + p = salt;
59510 + sg_set_buf(&sg, p, GR_SALT_LEN);
59511 + crypto_hash_update(&desc, &sg, sg.length);
59512 +
59513 + p = entry->pw;
59514 + sg_set_buf(&sg, p, strlen(p));
59515 +
59516 + crypto_hash_update(&desc, &sg, sg.length);
59517 +
59518 + crypto_hash_final(&desc, temp_sum);
59519 +
59520 + memset(entry->pw, 0, GR_PW_LEN);
59521 +
59522 + for (i = 0; i < GR_SHA_LEN; i++)
59523 + if (sum[i] != temp_sum[i])
59524 + retval = 1;
59525 + else
59526 + dummy = 1; // waste a cycle
59527 +
59528 + crypto_free_hash(tfm);
59529 +
59530 + return retval;
59531 +}
59532 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59533 index 6cd5b64..f620d2d 100644
59534 --- a/include/acpi/acpi_bus.h
59535 +++ b/include/acpi/acpi_bus.h
59536 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59537 acpi_op_bind bind;
59538 acpi_op_unbind unbind;
59539 acpi_op_notify notify;
59540 -};
59541 +} __no_const;
59542
59543 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59544
59545 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59546 index b7babf0..71e4e74 100644
59547 --- a/include/asm-generic/atomic-long.h
59548 +++ b/include/asm-generic/atomic-long.h
59549 @@ -22,6 +22,12 @@
59550
59551 typedef atomic64_t atomic_long_t;
59552
59553 +#ifdef CONFIG_PAX_REFCOUNT
59554 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59555 +#else
59556 +typedef atomic64_t atomic_long_unchecked_t;
59557 +#endif
59558 +
59559 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59560
59561 static inline long atomic_long_read(atomic_long_t *l)
59562 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59563 return (long)atomic64_read(v);
59564 }
59565
59566 +#ifdef CONFIG_PAX_REFCOUNT
59567 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59568 +{
59569 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59570 +
59571 + return (long)atomic64_read_unchecked(v);
59572 +}
59573 +#endif
59574 +
59575 static inline void atomic_long_set(atomic_long_t *l, long i)
59576 {
59577 atomic64_t *v = (atomic64_t *)l;
59578 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59579 atomic64_set(v, i);
59580 }
59581
59582 +#ifdef CONFIG_PAX_REFCOUNT
59583 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59584 +{
59585 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59586 +
59587 + atomic64_set_unchecked(v, i);
59588 +}
59589 +#endif
59590 +
59591 static inline void atomic_long_inc(atomic_long_t *l)
59592 {
59593 atomic64_t *v = (atomic64_t *)l;
59594 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59595 atomic64_inc(v);
59596 }
59597
59598 +#ifdef CONFIG_PAX_REFCOUNT
59599 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59600 +{
59601 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59602 +
59603 + atomic64_inc_unchecked(v);
59604 +}
59605 +#endif
59606 +
59607 static inline void atomic_long_dec(atomic_long_t *l)
59608 {
59609 atomic64_t *v = (atomic64_t *)l;
59610 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59611 atomic64_dec(v);
59612 }
59613
59614 +#ifdef CONFIG_PAX_REFCOUNT
59615 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59616 +{
59617 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59618 +
59619 + atomic64_dec_unchecked(v);
59620 +}
59621 +#endif
59622 +
59623 static inline void atomic_long_add(long i, atomic_long_t *l)
59624 {
59625 atomic64_t *v = (atomic64_t *)l;
59626 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59627 atomic64_add(i, v);
59628 }
59629
59630 +#ifdef CONFIG_PAX_REFCOUNT
59631 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59632 +{
59633 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59634 +
59635 + atomic64_add_unchecked(i, v);
59636 +}
59637 +#endif
59638 +
59639 static inline void atomic_long_sub(long i, atomic_long_t *l)
59640 {
59641 atomic64_t *v = (atomic64_t *)l;
59642 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59643 atomic64_sub(i, v);
59644 }
59645
59646 +#ifdef CONFIG_PAX_REFCOUNT
59647 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59648 +{
59649 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59650 +
59651 + atomic64_sub_unchecked(i, v);
59652 +}
59653 +#endif
59654 +
59655 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59656 {
59657 atomic64_t *v = (atomic64_t *)l;
59658 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59659 return (long)atomic64_inc_return(v);
59660 }
59661
59662 +#ifdef CONFIG_PAX_REFCOUNT
59663 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59664 +{
59665 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59666 +
59667 + return (long)atomic64_inc_return_unchecked(v);
59668 +}
59669 +#endif
59670 +
59671 static inline long atomic_long_dec_return(atomic_long_t *l)
59672 {
59673 atomic64_t *v = (atomic64_t *)l;
59674 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59675
59676 typedef atomic_t atomic_long_t;
59677
59678 +#ifdef CONFIG_PAX_REFCOUNT
59679 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59680 +#else
59681 +typedef atomic_t atomic_long_unchecked_t;
59682 +#endif
59683 +
59684 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59685 static inline long atomic_long_read(atomic_long_t *l)
59686 {
59687 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59688 return (long)atomic_read(v);
59689 }
59690
59691 +#ifdef CONFIG_PAX_REFCOUNT
59692 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59693 +{
59694 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59695 +
59696 + return (long)atomic_read_unchecked(v);
59697 +}
59698 +#endif
59699 +
59700 static inline void atomic_long_set(atomic_long_t *l, long i)
59701 {
59702 atomic_t *v = (atomic_t *)l;
59703 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59704 atomic_set(v, i);
59705 }
59706
59707 +#ifdef CONFIG_PAX_REFCOUNT
59708 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59709 +{
59710 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59711 +
59712 + atomic_set_unchecked(v, i);
59713 +}
59714 +#endif
59715 +
59716 static inline void atomic_long_inc(atomic_long_t *l)
59717 {
59718 atomic_t *v = (atomic_t *)l;
59719 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59720 atomic_inc(v);
59721 }
59722
59723 +#ifdef CONFIG_PAX_REFCOUNT
59724 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59725 +{
59726 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59727 +
59728 + atomic_inc_unchecked(v);
59729 +}
59730 +#endif
59731 +
59732 static inline void atomic_long_dec(atomic_long_t *l)
59733 {
59734 atomic_t *v = (atomic_t *)l;
59735 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59736 atomic_dec(v);
59737 }
59738
59739 +#ifdef CONFIG_PAX_REFCOUNT
59740 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59741 +{
59742 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59743 +
59744 + atomic_dec_unchecked(v);
59745 +}
59746 +#endif
59747 +
59748 static inline void atomic_long_add(long i, atomic_long_t *l)
59749 {
59750 atomic_t *v = (atomic_t *)l;
59751 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59752 atomic_add(i, v);
59753 }
59754
59755 +#ifdef CONFIG_PAX_REFCOUNT
59756 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59757 +{
59758 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59759 +
59760 + atomic_add_unchecked(i, v);
59761 +}
59762 +#endif
59763 +
59764 static inline void atomic_long_sub(long i, atomic_long_t *l)
59765 {
59766 atomic_t *v = (atomic_t *)l;
59767 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59768 atomic_sub(i, v);
59769 }
59770
59771 +#ifdef CONFIG_PAX_REFCOUNT
59772 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59773 +{
59774 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59775 +
59776 + atomic_sub_unchecked(i, v);
59777 +}
59778 +#endif
59779 +
59780 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59781 {
59782 atomic_t *v = (atomic_t *)l;
59783 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59784 return (long)atomic_inc_return(v);
59785 }
59786
59787 +#ifdef CONFIG_PAX_REFCOUNT
59788 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59789 +{
59790 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59791 +
59792 + return (long)atomic_inc_return_unchecked(v);
59793 +}
59794 +#endif
59795 +
59796 static inline long atomic_long_dec_return(atomic_long_t *l)
59797 {
59798 atomic_t *v = (atomic_t *)l;
59799 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59800
59801 #endif /* BITS_PER_LONG == 64 */
59802
59803 +#ifdef CONFIG_PAX_REFCOUNT
59804 +static inline void pax_refcount_needs_these_functions(void)
59805 +{
59806 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59807 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59808 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59809 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59810 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59811 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59812 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59813 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59814 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59815 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59816 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59817 +
59818 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59819 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59820 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59821 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59822 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59823 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59824 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59825 +}
59826 +#else
59827 +#define atomic_read_unchecked(v) atomic_read(v)
59828 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59829 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59830 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59831 +#define atomic_inc_unchecked(v) atomic_inc(v)
59832 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59833 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59834 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59835 +#define atomic_dec_unchecked(v) atomic_dec(v)
59836 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59837 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59838 +
59839 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59840 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59841 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59842 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59843 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59844 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59845 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59846 +#endif
59847 +
59848 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59849 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59850 index b18ce4f..2ee2843 100644
59851 --- a/include/asm-generic/atomic64.h
59852 +++ b/include/asm-generic/atomic64.h
59853 @@ -16,6 +16,8 @@ typedef struct {
59854 long long counter;
59855 } atomic64_t;
59856
59857 +typedef atomic64_t atomic64_unchecked_t;
59858 +
59859 #define ATOMIC64_INIT(i) { (i) }
59860
59861 extern long long atomic64_read(const atomic64_t *v);
59862 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59863 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59864 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59865
59866 +#define atomic64_read_unchecked(v) atomic64_read(v)
59867 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59868 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59869 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59870 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59871 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59872 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59873 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59874 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59875 +
59876 #endif /* _ASM_GENERIC_ATOMIC64_H */
59877 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59878 index 1bfcfe5..e04c5c9 100644
59879 --- a/include/asm-generic/cache.h
59880 +++ b/include/asm-generic/cache.h
59881 @@ -6,7 +6,7 @@
59882 * cache lines need to provide their own cache.h.
59883 */
59884
59885 -#define L1_CACHE_SHIFT 5
59886 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59887 +#define L1_CACHE_SHIFT 5UL
59888 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59889
59890 #endif /* __ASM_GENERIC_CACHE_H */
59891 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59892 index 0d68a1e..b74a761 100644
59893 --- a/include/asm-generic/emergency-restart.h
59894 +++ b/include/asm-generic/emergency-restart.h
59895 @@ -1,7 +1,7 @@
59896 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59897 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59898
59899 -static inline void machine_emergency_restart(void)
59900 +static inline __noreturn void machine_emergency_restart(void)
59901 {
59902 machine_restart(NULL);
59903 }
59904 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59905 index 0232ccb..13d9165 100644
59906 --- a/include/asm-generic/kmap_types.h
59907 +++ b/include/asm-generic/kmap_types.h
59908 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59909 KMAP_D(17) KM_NMI,
59910 KMAP_D(18) KM_NMI_PTE,
59911 KMAP_D(19) KM_KDB,
59912 +KMAP_D(20) KM_CLEARPAGE,
59913 /*
59914 * Remember to update debug_kmap_atomic() when adding new kmap types!
59915 */
59916 -KMAP_D(20) KM_TYPE_NR
59917 +KMAP_D(21) KM_TYPE_NR
59918 };
59919
59920 #undef KMAP_D
59921 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59922 index 9ceb03b..2efbcbd 100644
59923 --- a/include/asm-generic/local.h
59924 +++ b/include/asm-generic/local.h
59925 @@ -39,6 +39,7 @@ typedef struct
59926 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59927 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59928 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59929 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59930
59931 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59932 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59933 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59934 index 725612b..9cc513a 100644
59935 --- a/include/asm-generic/pgtable-nopmd.h
59936 +++ b/include/asm-generic/pgtable-nopmd.h
59937 @@ -1,14 +1,19 @@
59938 #ifndef _PGTABLE_NOPMD_H
59939 #define _PGTABLE_NOPMD_H
59940
59941 -#ifndef __ASSEMBLY__
59942 -
59943 #include <asm-generic/pgtable-nopud.h>
59944
59945 -struct mm_struct;
59946 -
59947 #define __PAGETABLE_PMD_FOLDED
59948
59949 +#define PMD_SHIFT PUD_SHIFT
59950 +#define PTRS_PER_PMD 1
59951 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59952 +#define PMD_MASK (~(PMD_SIZE-1))
59953 +
59954 +#ifndef __ASSEMBLY__
59955 +
59956 +struct mm_struct;
59957 +
59958 /*
59959 * Having the pmd type consist of a pud gets the size right, and allows
59960 * us to conceptually access the pud entry that this pmd is folded into
59961 @@ -16,11 +21,6 @@ struct mm_struct;
59962 */
59963 typedef struct { pud_t pud; } pmd_t;
59964
59965 -#define PMD_SHIFT PUD_SHIFT
59966 -#define PTRS_PER_PMD 1
59967 -#define PMD_SIZE (1UL << PMD_SHIFT)
59968 -#define PMD_MASK (~(PMD_SIZE-1))
59969 -
59970 /*
59971 * The "pud_xxx()" functions here are trivial for a folded two-level
59972 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59973 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59974 index 810431d..ccc3638 100644
59975 --- a/include/asm-generic/pgtable-nopud.h
59976 +++ b/include/asm-generic/pgtable-nopud.h
59977 @@ -1,10 +1,15 @@
59978 #ifndef _PGTABLE_NOPUD_H
59979 #define _PGTABLE_NOPUD_H
59980
59981 -#ifndef __ASSEMBLY__
59982 -
59983 #define __PAGETABLE_PUD_FOLDED
59984
59985 +#define PUD_SHIFT PGDIR_SHIFT
59986 +#define PTRS_PER_PUD 1
59987 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59988 +#define PUD_MASK (~(PUD_SIZE-1))
59989 +
59990 +#ifndef __ASSEMBLY__
59991 +
59992 /*
59993 * Having the pud type consist of a pgd gets the size right, and allows
59994 * us to conceptually access the pgd entry that this pud is folded into
59995 @@ -12,11 +17,6 @@
59996 */
59997 typedef struct { pgd_t pgd; } pud_t;
59998
59999 -#define PUD_SHIFT PGDIR_SHIFT
60000 -#define PTRS_PER_PUD 1
60001 -#define PUD_SIZE (1UL << PUD_SHIFT)
60002 -#define PUD_MASK (~(PUD_SIZE-1))
60003 -
60004 /*
60005 * The "pgd_xxx()" functions here are trivial for a folded two-level
60006 * setup: the pud is never bad, and a pud always exists (as it's folded
60007 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60008 index a03c098..7e5b223 100644
60009 --- a/include/asm-generic/pgtable.h
60010 +++ b/include/asm-generic/pgtable.h
60011 @@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60012 #endif
60013 }
60014
60015 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60016 +static inline unsigned long pax_open_kernel(void) { return 0; }
60017 +#endif
60018 +
60019 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60020 +static inline unsigned long pax_close_kernel(void) { return 0; }
60021 +#endif
60022 +
60023 #endif /* CONFIG_MMU */
60024
60025 #endif /* !__ASSEMBLY__ */
60026 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
60027 index 9788568..510dece 100644
60028 --- a/include/asm-generic/uaccess.h
60029 +++ b/include/asm-generic/uaccess.h
60030 @@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
60031 */
60032 #ifndef __copy_from_user
60033 static inline __must_check long __copy_from_user(void *to,
60034 + const void __user * from, unsigned long n) __size_overflow(3);
60035 +static inline __must_check long __copy_from_user(void *to,
60036 const void __user * from, unsigned long n)
60037 {
60038 if (__builtin_constant_p(n)) {
60039 @@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
60040
60041 #ifndef __copy_to_user
60042 static inline __must_check long __copy_to_user(void __user *to,
60043 + const void *from, unsigned long n) __size_overflow(3);
60044 +static inline __must_check long __copy_to_user(void __user *to,
60045 const void *from, unsigned long n)
60046 {
60047 if (__builtin_constant_p(n)) {
60048 @@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
60049 -EFAULT; \
60050 })
60051
60052 +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
60053 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
60054 {
60055 size = __copy_from_user(x, ptr, size);
60056 @@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
60057 #define __copy_to_user_inatomic __copy_to_user
60058 #endif
60059
60060 +static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
60061 static inline long copy_from_user(void *to,
60062 const void __user * from, unsigned long n)
60063 {
60064 @@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
60065 return n;
60066 }
60067
60068 +static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
60069 static inline long copy_to_user(void __user *to,
60070 const void *from, unsigned long n)
60071 {
60072 @@ -314,6 +321,8 @@ static inline long strlen_user(const char __user *src)
60073 */
60074 #ifndef __clear_user
60075 static inline __must_check unsigned long
60076 +__clear_user(void __user *to, unsigned long n) __size_overflow(2);
60077 +static inline __must_check unsigned long
60078 __clear_user(void __user *to, unsigned long n)
60079 {
60080 memset((void __force *)to, 0, n);
60081 @@ -322,6 +331,8 @@ __clear_user(void __user *to, unsigned long n)
60082 #endif
60083
60084 static inline __must_check unsigned long
60085 +clear_user(void __user *to, unsigned long n) __size_overflow(2);
60086 +static inline __must_check unsigned long
60087 clear_user(void __user *to, unsigned long n)
60088 {
60089 might_sleep();
60090 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60091 index b5e2e4c..6a5373e 100644
60092 --- a/include/asm-generic/vmlinux.lds.h
60093 +++ b/include/asm-generic/vmlinux.lds.h
60094 @@ -217,6 +217,7 @@
60095 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60096 VMLINUX_SYMBOL(__start_rodata) = .; \
60097 *(.rodata) *(.rodata.*) \
60098 + *(.data..read_only) \
60099 *(__vermagic) /* Kernel version magic */ \
60100 . = ALIGN(8); \
60101 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60102 @@ -722,17 +723,18 @@
60103 * section in the linker script will go there too. @phdr should have
60104 * a leading colon.
60105 *
60106 - * Note that this macros defines __per_cpu_load as an absolute symbol.
60107 + * Note that this macros defines per_cpu_load as an absolute symbol.
60108 * If there is no need to put the percpu section at a predetermined
60109 * address, use PERCPU_SECTION.
60110 */
60111 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60112 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
60113 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60114 + per_cpu_load = .; \
60115 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60116 - LOAD_OFFSET) { \
60117 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60118 PERCPU_INPUT(cacheline) \
60119 } phdr \
60120 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60121 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60122
60123 /**
60124 * PERCPU_SECTION - define output section for percpu area, simple version
60125 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60126 index 92f0981..d44a37c 100644
60127 --- a/include/drm/drmP.h
60128 +++ b/include/drm/drmP.h
60129 @@ -72,6 +72,7 @@
60130 #include <linux/workqueue.h>
60131 #include <linux/poll.h>
60132 #include <asm/pgalloc.h>
60133 +#include <asm/local.h>
60134 #include "drm.h"
60135
60136 #include <linux/idr.h>
60137 @@ -1038,7 +1039,7 @@ struct drm_device {
60138
60139 /** \name Usage Counters */
60140 /*@{ */
60141 - int open_count; /**< Outstanding files open */
60142 + local_t open_count; /**< Outstanding files open */
60143 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60144 atomic_t vma_count; /**< Outstanding vma areas open */
60145 int buf_use; /**< Buffers in use -- cannot alloc */
60146 @@ -1049,7 +1050,7 @@ struct drm_device {
60147 /*@{ */
60148 unsigned long counters;
60149 enum drm_stat_type types[15];
60150 - atomic_t counts[15];
60151 + atomic_unchecked_t counts[15];
60152 /*@} */
60153
60154 struct list_head filelist;
60155 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60156 index 37515d1..34fa8b0 100644
60157 --- a/include/drm/drm_crtc_helper.h
60158 +++ b/include/drm/drm_crtc_helper.h
60159 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60160
60161 /* disable crtc when not in use - more explicit than dpms off */
60162 void (*disable)(struct drm_crtc *crtc);
60163 -};
60164 +} __no_const;
60165
60166 struct drm_encoder_helper_funcs {
60167 void (*dpms)(struct drm_encoder *encoder, int mode);
60168 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60169 struct drm_connector *connector);
60170 /* disable encoder when not in use - more explicit than dpms off */
60171 void (*disable)(struct drm_encoder *encoder);
60172 -};
60173 +} __no_const;
60174
60175 struct drm_connector_helper_funcs {
60176 int (*get_modes)(struct drm_connector *connector);
60177 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60178 index 26c1f78..6722682 100644
60179 --- a/include/drm/ttm/ttm_memory.h
60180 +++ b/include/drm/ttm/ttm_memory.h
60181 @@ -47,7 +47,7 @@
60182
60183 struct ttm_mem_shrink {
60184 int (*do_shrink) (struct ttm_mem_shrink *);
60185 -};
60186 +} __no_const;
60187
60188 /**
60189 * struct ttm_mem_global - Global memory accounting structure.
60190 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60191 index e86dfca..40cc55f 100644
60192 --- a/include/linux/a.out.h
60193 +++ b/include/linux/a.out.h
60194 @@ -39,6 +39,14 @@ enum machine_type {
60195 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60196 };
60197
60198 +/* Constants for the N_FLAGS field */
60199 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60200 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60201 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60202 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60203 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60204 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60205 +
60206 #if !defined (N_MAGIC)
60207 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60208 #endif
60209 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60210 index f4ff882..84b53a6 100644
60211 --- a/include/linux/atmdev.h
60212 +++ b/include/linux/atmdev.h
60213 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60214 #endif
60215
60216 struct k_atm_aal_stats {
60217 -#define __HANDLE_ITEM(i) atomic_t i
60218 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60219 __AAL_STAT_ITEMS
60220 #undef __HANDLE_ITEM
60221 };
60222 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60223 index 0092102..8a801b4 100644
60224 --- a/include/linux/binfmts.h
60225 +++ b/include/linux/binfmts.h
60226 @@ -89,6 +89,7 @@ struct linux_binfmt {
60227 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60228 int (*load_shlib)(struct file *);
60229 int (*core_dump)(struct coredump_params *cprm);
60230 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60231 unsigned long min_coredump; /* minimal dump size */
60232 };
60233
60234 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60235 index 606cf33..b72c577 100644
60236 --- a/include/linux/blkdev.h
60237 +++ b/include/linux/blkdev.h
60238 @@ -1379,7 +1379,7 @@ struct block_device_operations {
60239 /* this callback is with swap_lock and sometimes page table lock held */
60240 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60241 struct module *owner;
60242 -};
60243 +} __do_const;
60244
60245 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60246 unsigned long);
60247 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60248 index 4d1a074..88f929a 100644
60249 --- a/include/linux/blktrace_api.h
60250 +++ b/include/linux/blktrace_api.h
60251 @@ -162,7 +162,7 @@ struct blk_trace {
60252 struct dentry *dir;
60253 struct dentry *dropped_file;
60254 struct dentry *msg_file;
60255 - atomic_t dropped;
60256 + atomic_unchecked_t dropped;
60257 };
60258
60259 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60260 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60261 index 83195fb..0b0f77d 100644
60262 --- a/include/linux/byteorder/little_endian.h
60263 +++ b/include/linux/byteorder/little_endian.h
60264 @@ -42,51 +42,51 @@
60265
60266 static inline __le64 __cpu_to_le64p(const __u64 *p)
60267 {
60268 - return (__force __le64)*p;
60269 + return (__force const __le64)*p;
60270 }
60271 static inline __u64 __le64_to_cpup(const __le64 *p)
60272 {
60273 - return (__force __u64)*p;
60274 + return (__force const __u64)*p;
60275 }
60276 static inline __le32 __cpu_to_le32p(const __u32 *p)
60277 {
60278 - return (__force __le32)*p;
60279 + return (__force const __le32)*p;
60280 }
60281 static inline __u32 __le32_to_cpup(const __le32 *p)
60282 {
60283 - return (__force __u32)*p;
60284 + return (__force const __u32)*p;
60285 }
60286 static inline __le16 __cpu_to_le16p(const __u16 *p)
60287 {
60288 - return (__force __le16)*p;
60289 + return (__force const __le16)*p;
60290 }
60291 static inline __u16 __le16_to_cpup(const __le16 *p)
60292 {
60293 - return (__force __u16)*p;
60294 + return (__force const __u16)*p;
60295 }
60296 static inline __be64 __cpu_to_be64p(const __u64 *p)
60297 {
60298 - return (__force __be64)__swab64p(p);
60299 + return (__force const __be64)__swab64p(p);
60300 }
60301 static inline __u64 __be64_to_cpup(const __be64 *p)
60302 {
60303 - return __swab64p((__u64 *)p);
60304 + return __swab64p((const __u64 *)p);
60305 }
60306 static inline __be32 __cpu_to_be32p(const __u32 *p)
60307 {
60308 - return (__force __be32)__swab32p(p);
60309 + return (__force const __be32)__swab32p(p);
60310 }
60311 static inline __u32 __be32_to_cpup(const __be32 *p)
60312 {
60313 - return __swab32p((__u32 *)p);
60314 + return __swab32p((const __u32 *)p);
60315 }
60316 static inline __be16 __cpu_to_be16p(const __u16 *p)
60317 {
60318 - return (__force __be16)__swab16p(p);
60319 + return (__force const __be16)__swab16p(p);
60320 }
60321 static inline __u16 __be16_to_cpup(const __be16 *p)
60322 {
60323 - return __swab16p((__u16 *)p);
60324 + return __swab16p((const __u16 *)p);
60325 }
60326 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60327 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60328 diff --git a/include/linux/cache.h b/include/linux/cache.h
60329 index 4c57065..4307975 100644
60330 --- a/include/linux/cache.h
60331 +++ b/include/linux/cache.h
60332 @@ -16,6 +16,10 @@
60333 #define __read_mostly
60334 #endif
60335
60336 +#ifndef __read_only
60337 +#define __read_only __read_mostly
60338 +#endif
60339 +
60340 #ifndef ____cacheline_aligned
60341 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60342 #endif
60343 diff --git a/include/linux/capability.h b/include/linux/capability.h
60344 index 12d52de..b5f7fa7 100644
60345 --- a/include/linux/capability.h
60346 +++ b/include/linux/capability.h
60347 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60348 extern bool capable(int cap);
60349 extern bool ns_capable(struct user_namespace *ns, int cap);
60350 extern bool nsown_capable(int cap);
60351 +extern bool capable_nolog(int cap);
60352 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60353
60354 /* audit system wants to get cap info from files as well */
60355 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60356 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60357 index 04ffb2e..6799180 100644
60358 --- a/include/linux/cleancache.h
60359 +++ b/include/linux/cleancache.h
60360 @@ -31,7 +31,7 @@ struct cleancache_ops {
60361 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60362 void (*flush_inode)(int, struct cleancache_filekey);
60363 void (*flush_fs)(int);
60364 -};
60365 +} __no_const;
60366
60367 extern struct cleancache_ops
60368 cleancache_register_ops(struct cleancache_ops *ops);
60369 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60370 index 2f40791..567b215 100644
60371 --- a/include/linux/compiler-gcc4.h
60372 +++ b/include/linux/compiler-gcc4.h
60373 @@ -32,6 +32,15 @@
60374 #define __linktime_error(message) __attribute__((__error__(message)))
60375
60376 #if __GNUC_MINOR__ >= 5
60377 +
60378 +#ifdef CONSTIFY_PLUGIN
60379 +#define __no_const __attribute__((no_const))
60380 +#define __do_const __attribute__((do_const))
60381 +#endif
60382 +
60383 +#ifdef SIZE_OVERFLOW_PLUGIN
60384 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60385 +#endif
60386 /*
60387 * Mark a position in code as unreachable. This can be used to
60388 * suppress control flow warnings after asm blocks that transfer
60389 @@ -47,6 +56,11 @@
60390 #define __noclone __attribute__((__noclone__))
60391
60392 #endif
60393 +
60394 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60395 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60396 +#define __bos0(ptr) __bos((ptr), 0)
60397 +#define __bos1(ptr) __bos((ptr), 1)
60398 #endif
60399
60400 #if __GNUC_MINOR__ > 0
60401 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60402 index 4a24354..ecaff7a 100644
60403 --- a/include/linux/compiler.h
60404 +++ b/include/linux/compiler.h
60405 @@ -5,31 +5,62 @@
60406
60407 #ifdef __CHECKER__
60408 # define __user __attribute__((noderef, address_space(1)))
60409 +# define __force_user __force __user
60410 # define __kernel __attribute__((address_space(0)))
60411 +# define __force_kernel __force __kernel
60412 # define __safe __attribute__((safe))
60413 # define __force __attribute__((force))
60414 # define __nocast __attribute__((nocast))
60415 # define __iomem __attribute__((noderef, address_space(2)))
60416 +# define __force_iomem __force __iomem
60417 # define __acquires(x) __attribute__((context(x,0,1)))
60418 # define __releases(x) __attribute__((context(x,1,0)))
60419 # define __acquire(x) __context__(x,1)
60420 # define __release(x) __context__(x,-1)
60421 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60422 # define __percpu __attribute__((noderef, address_space(3)))
60423 +# define __force_percpu __force __percpu
60424 #ifdef CONFIG_SPARSE_RCU_POINTER
60425 # define __rcu __attribute__((noderef, address_space(4)))
60426 +# define __force_rcu __force __rcu
60427 #else
60428 # define __rcu
60429 +# define __force_rcu
60430 #endif
60431 extern void __chk_user_ptr(const volatile void __user *);
60432 extern void __chk_io_ptr(const volatile void __iomem *);
60433 +#elif defined(CHECKER_PLUGIN)
60434 +//# define __user
60435 +//# define __force_user
60436 +//# define __kernel
60437 +//# define __force_kernel
60438 +# define __safe
60439 +# define __force
60440 +# define __nocast
60441 +# define __iomem
60442 +# define __force_iomem
60443 +# define __chk_user_ptr(x) (void)0
60444 +# define __chk_io_ptr(x) (void)0
60445 +# define __builtin_warning(x, y...) (1)
60446 +# define __acquires(x)
60447 +# define __releases(x)
60448 +# define __acquire(x) (void)0
60449 +# define __release(x) (void)0
60450 +# define __cond_lock(x,c) (c)
60451 +# define __percpu
60452 +# define __force_percpu
60453 +# define __rcu
60454 +# define __force_rcu
60455 #else
60456 # define __user
60457 +# define __force_user
60458 # define __kernel
60459 +# define __force_kernel
60460 # define __safe
60461 # define __force
60462 # define __nocast
60463 # define __iomem
60464 +# define __force_iomem
60465 # define __chk_user_ptr(x) (void)0
60466 # define __chk_io_ptr(x) (void)0
60467 # define __builtin_warning(x, y...) (1)
60468 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60469 # define __release(x) (void)0
60470 # define __cond_lock(x,c) (c)
60471 # define __percpu
60472 +# define __force_percpu
60473 # define __rcu
60474 +# define __force_rcu
60475 #endif
60476
60477 #ifdef __KERNEL__
60478 @@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60479 # define __attribute_const__ /* unimplemented */
60480 #endif
60481
60482 +#ifndef __no_const
60483 +# define __no_const
60484 +#endif
60485 +
60486 +#ifndef __do_const
60487 +# define __do_const
60488 +#endif
60489 +
60490 +#ifndef __size_overflow
60491 +# define __size_overflow(...)
60492 +#endif
60493 /*
60494 * Tell gcc if a function is cold. The compiler will assume any path
60495 * directly leading to the call is unlikely.
60496 @@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60497 #define __cold
60498 #endif
60499
60500 +#ifndef __alloc_size
60501 +#define __alloc_size(...)
60502 +#endif
60503 +
60504 +#ifndef __bos
60505 +#define __bos(ptr, arg)
60506 +#endif
60507 +
60508 +#ifndef __bos0
60509 +#define __bos0(ptr)
60510 +#endif
60511 +
60512 +#ifndef __bos1
60513 +#define __bos1(ptr)
60514 +#endif
60515 +
60516 /* Simple shorthand for a section definition */
60517 #ifndef __section
60518 # define __section(S) __attribute__ ((__section__(#S)))
60519 @@ -308,6 +368,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60520 * use is to mediate communication between process-level code and irq/NMI
60521 * handlers, all running on the same CPU.
60522 */
60523 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60524 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60525 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60526
60527 #endif /* __LINUX_COMPILER_H */
60528 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60529 index e9eaec5..bfeb9bb 100644
60530 --- a/include/linux/cpuset.h
60531 +++ b/include/linux/cpuset.h
60532 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60533 * nodemask.
60534 */
60535 smp_mb();
60536 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60537 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60538 }
60539
60540 static inline void set_mems_allowed(nodemask_t nodemask)
60541 diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
60542 index b936763..48685ee 100644
60543 --- a/include/linux/crash_dump.h
60544 +++ b/include/linux/crash_dump.h
60545 @@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr;
60546 extern unsigned long long elfcorehdr_size;
60547
60548 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
60549 - unsigned long, int);
60550 + unsigned long, int) __size_overflow(3);
60551
60552 /* Architecture code defines this if there are other possible ELF
60553 * machine types, e.g. on bi-arch capable hardware. */
60554 diff --git a/include/linux/cred.h b/include/linux/cred.h
60555 index adadf71..6af5560 100644
60556 --- a/include/linux/cred.h
60557 +++ b/include/linux/cred.h
60558 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60559 static inline void validate_process_creds(void)
60560 {
60561 }
60562 +static inline void validate_task_creds(struct task_struct *task)
60563 +{
60564 +}
60565 #endif
60566
60567 /**
60568 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60569 index 8a94217..15d49e3 100644
60570 --- a/include/linux/crypto.h
60571 +++ b/include/linux/crypto.h
60572 @@ -365,7 +365,7 @@ struct cipher_tfm {
60573 const u8 *key, unsigned int keylen);
60574 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60575 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60576 -};
60577 +} __no_const;
60578
60579 struct hash_tfm {
60580 int (*init)(struct hash_desc *desc);
60581 @@ -386,13 +386,13 @@ struct compress_tfm {
60582 int (*cot_decompress)(struct crypto_tfm *tfm,
60583 const u8 *src, unsigned int slen,
60584 u8 *dst, unsigned int *dlen);
60585 -};
60586 +} __no_const;
60587
60588 struct rng_tfm {
60589 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60590 unsigned int dlen);
60591 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60592 -};
60593 +} __no_const;
60594
60595 #define crt_ablkcipher crt_u.ablkcipher
60596 #define crt_aead crt_u.aead
60597 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60598 index 7925bf0..d5143d2 100644
60599 --- a/include/linux/decompress/mm.h
60600 +++ b/include/linux/decompress/mm.h
60601 @@ -77,7 +77,7 @@ static void free(void *where)
60602 * warnings when not needed (indeed large_malloc / large_free are not
60603 * needed by inflate */
60604
60605 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60606 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60607 #define free(a) kfree(a)
60608
60609 #define large_malloc(a) vmalloc(a)
60610 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60611 index e13117c..e9fc938 100644
60612 --- a/include/linux/dma-mapping.h
60613 +++ b/include/linux/dma-mapping.h
60614 @@ -46,7 +46,7 @@ struct dma_map_ops {
60615 u64 (*get_required_mask)(struct device *dev);
60616 #endif
60617 int is_phys;
60618 -};
60619 +} __do_const;
60620
60621 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60622
60623 diff --git a/include/linux/efi.h b/include/linux/efi.h
60624 index 37c3007..92ab679 100644
60625 --- a/include/linux/efi.h
60626 +++ b/include/linux/efi.h
60627 @@ -580,7 +580,7 @@ struct efivar_operations {
60628 efi_get_variable_t *get_variable;
60629 efi_get_next_variable_t *get_next_variable;
60630 efi_set_variable_t *set_variable;
60631 -};
60632 +} __no_const;
60633
60634 struct efivars {
60635 /*
60636 diff --git a/include/linux/elf.h b/include/linux/elf.h
60637 index 999b4f5..57753b4 100644
60638 --- a/include/linux/elf.h
60639 +++ b/include/linux/elf.h
60640 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60641 #define PT_GNU_EH_FRAME 0x6474e550
60642
60643 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60644 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60645 +
60646 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60647 +
60648 +/* Constants for the e_flags field */
60649 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60650 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60651 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60652 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60653 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60654 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60655
60656 /*
60657 * Extended Numbering
60658 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60659 #define DT_DEBUG 21
60660 #define DT_TEXTREL 22
60661 #define DT_JMPREL 23
60662 +#define DT_FLAGS 30
60663 + #define DF_TEXTREL 0x00000004
60664 #define DT_ENCODING 32
60665 #define OLD_DT_LOOS 0x60000000
60666 #define DT_LOOS 0x6000000d
60667 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60668 #define PF_W 0x2
60669 #define PF_X 0x1
60670
60671 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60672 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60673 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60674 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60675 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60676 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60677 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60678 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60679 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60680 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60681 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60682 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60683 +
60684 typedef struct elf32_phdr{
60685 Elf32_Word p_type;
60686 Elf32_Off p_offset;
60687 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60688 #define EI_OSABI 7
60689 #define EI_PAD 8
60690
60691 +#define EI_PAX 14
60692 +
60693 #define ELFMAG0 0x7f /* EI_MAG */
60694 #define ELFMAG1 'E'
60695 #define ELFMAG2 'L'
60696 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60697 #define elf_note elf32_note
60698 #define elf_addr_t Elf32_Off
60699 #define Elf_Half Elf32_Half
60700 +#define elf_dyn Elf32_Dyn
60701
60702 #else
60703
60704 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60705 #define elf_note elf64_note
60706 #define elf_addr_t Elf64_Off
60707 #define Elf_Half Elf64_Half
60708 +#define elf_dyn Elf64_Dyn
60709
60710 #endif
60711
60712 diff --git a/include/linux/filter.h b/include/linux/filter.h
60713 index 8eeb205..d59bfa2 100644
60714 --- a/include/linux/filter.h
60715 +++ b/include/linux/filter.h
60716 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60717
60718 struct sk_buff;
60719 struct sock;
60720 +struct bpf_jit_work;
60721
60722 struct sk_filter
60723 {
60724 @@ -141,6 +142,9 @@ struct sk_filter
60725 unsigned int len; /* Number of filter blocks */
60726 unsigned int (*bpf_func)(const struct sk_buff *skb,
60727 const struct sock_filter *filter);
60728 +#ifdef CONFIG_BPF_JIT
60729 + struct bpf_jit_work *work;
60730 +#endif
60731 struct rcu_head rcu;
60732 struct sock_filter insns[0];
60733 };
60734 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60735 index 84ccf8e..2e9b14c 100644
60736 --- a/include/linux/firewire.h
60737 +++ b/include/linux/firewire.h
60738 @@ -428,7 +428,7 @@ struct fw_iso_context {
60739 union {
60740 fw_iso_callback_t sc;
60741 fw_iso_mc_callback_t mc;
60742 - } callback;
60743 + } __no_const callback;
60744 void *callback_data;
60745 };
60746
60747 diff --git a/include/linux/fs.h b/include/linux/fs.h
60748 index f4b6e06..d6ba573 100644
60749 --- a/include/linux/fs.h
60750 +++ b/include/linux/fs.h
60751 @@ -1628,7 +1628,8 @@ struct file_operations {
60752 int (*setlease)(struct file *, long, struct file_lock **);
60753 long (*fallocate)(struct file *file, int mode, loff_t offset,
60754 loff_t len);
60755 -};
60756 +} __do_const;
60757 +typedef struct file_operations __no_const file_operations_no_const;
60758
60759 struct inode_operations {
60760 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60761 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60762 index 003dc0f..3c4ea97 100644
60763 --- a/include/linux/fs_struct.h
60764 +++ b/include/linux/fs_struct.h
60765 @@ -6,7 +6,7 @@
60766 #include <linux/seqlock.h>
60767
60768 struct fs_struct {
60769 - int users;
60770 + atomic_t users;
60771 spinlock_t lock;
60772 seqcount_t seq;
60773 int umask;
60774 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60775 index ce31408..b1ad003 100644
60776 --- a/include/linux/fscache-cache.h
60777 +++ b/include/linux/fscache-cache.h
60778 @@ -102,7 +102,7 @@ struct fscache_operation {
60779 fscache_operation_release_t release;
60780 };
60781
60782 -extern atomic_t fscache_op_debug_id;
60783 +extern atomic_unchecked_t fscache_op_debug_id;
60784 extern void fscache_op_work_func(struct work_struct *work);
60785
60786 extern void fscache_enqueue_operation(struct fscache_operation *);
60787 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60788 {
60789 INIT_WORK(&op->work, fscache_op_work_func);
60790 atomic_set(&op->usage, 1);
60791 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60792 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60793 op->processor = processor;
60794 op->release = release;
60795 INIT_LIST_HEAD(&op->pend_link);
60796 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60797 index 2a53f10..0187fdf 100644
60798 --- a/include/linux/fsnotify.h
60799 +++ b/include/linux/fsnotify.h
60800 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60801 */
60802 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60803 {
60804 - return kstrdup(name, GFP_KERNEL);
60805 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60806 }
60807
60808 /*
60809 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60810 index 91d0e0a3..035666b 100644
60811 --- a/include/linux/fsnotify_backend.h
60812 +++ b/include/linux/fsnotify_backend.h
60813 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60814 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60815 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60816 };
60817 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60818
60819 /*
60820 * A group is a "thing" that wants to receive notification about filesystem
60821 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60822 index c3da42d..c70e0df 100644
60823 --- a/include/linux/ftrace_event.h
60824 +++ b/include/linux/ftrace_event.h
60825 @@ -97,7 +97,7 @@ struct trace_event_functions {
60826 trace_print_func raw;
60827 trace_print_func hex;
60828 trace_print_func binary;
60829 -};
60830 +} __no_const;
60831
60832 struct trace_event {
60833 struct hlist_node node;
60834 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60835 extern int trace_add_event_call(struct ftrace_event_call *call);
60836 extern void trace_remove_event_call(struct ftrace_event_call *call);
60837
60838 -#define is_signed_type(type) (((type)(-1)) < 0)
60839 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60840
60841 int trace_set_clr_event(const char *system, const char *event, int set);
60842
60843 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60844 index e61d319..0da8505 100644
60845 --- a/include/linux/genhd.h
60846 +++ b/include/linux/genhd.h
60847 @@ -185,7 +185,7 @@ struct gendisk {
60848 struct kobject *slave_dir;
60849
60850 struct timer_rand_state *random;
60851 - atomic_t sync_io; /* RAID */
60852 + atomic_unchecked_t sync_io; /* RAID */
60853 struct disk_events *ev;
60854 #ifdef CONFIG_BLK_DEV_INTEGRITY
60855 struct blk_integrity *integrity;
60856 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60857 new file mode 100644
60858 index 0000000..8a130b6
60859 --- /dev/null
60860 +++ b/include/linux/gracl.h
60861 @@ -0,0 +1,319 @@
60862 +#ifndef GR_ACL_H
60863 +#define GR_ACL_H
60864 +
60865 +#include <linux/grdefs.h>
60866 +#include <linux/resource.h>
60867 +#include <linux/capability.h>
60868 +#include <linux/dcache.h>
60869 +#include <asm/resource.h>
60870 +
60871 +/* Major status information */
60872 +
60873 +#define GR_VERSION "grsecurity 2.9"
60874 +#define GRSECURITY_VERSION 0x2900
60875 +
60876 +enum {
60877 + GR_SHUTDOWN = 0,
60878 + GR_ENABLE = 1,
60879 + GR_SPROLE = 2,
60880 + GR_RELOAD = 3,
60881 + GR_SEGVMOD = 4,
60882 + GR_STATUS = 5,
60883 + GR_UNSPROLE = 6,
60884 + GR_PASSSET = 7,
60885 + GR_SPROLEPAM = 8,
60886 +};
60887 +
60888 +/* Password setup definitions
60889 + * kernel/grhash.c */
60890 +enum {
60891 + GR_PW_LEN = 128,
60892 + GR_SALT_LEN = 16,
60893 + GR_SHA_LEN = 32,
60894 +};
60895 +
60896 +enum {
60897 + GR_SPROLE_LEN = 64,
60898 +};
60899 +
60900 +enum {
60901 + GR_NO_GLOB = 0,
60902 + GR_REG_GLOB,
60903 + GR_CREATE_GLOB
60904 +};
60905 +
60906 +#define GR_NLIMITS 32
60907 +
60908 +/* Begin Data Structures */
60909 +
60910 +struct sprole_pw {
60911 + unsigned char *rolename;
60912 + unsigned char salt[GR_SALT_LEN];
60913 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60914 +};
60915 +
60916 +struct name_entry {
60917 + __u32 key;
60918 + ino_t inode;
60919 + dev_t device;
60920 + char *name;
60921 + __u16 len;
60922 + __u8 deleted;
60923 + struct name_entry *prev;
60924 + struct name_entry *next;
60925 +};
60926 +
60927 +struct inodev_entry {
60928 + struct name_entry *nentry;
60929 + struct inodev_entry *prev;
60930 + struct inodev_entry *next;
60931 +};
60932 +
60933 +struct acl_role_db {
60934 + struct acl_role_label **r_hash;
60935 + __u32 r_size;
60936 +};
60937 +
60938 +struct inodev_db {
60939 + struct inodev_entry **i_hash;
60940 + __u32 i_size;
60941 +};
60942 +
60943 +struct name_db {
60944 + struct name_entry **n_hash;
60945 + __u32 n_size;
60946 +};
60947 +
60948 +struct crash_uid {
60949 + uid_t uid;
60950 + unsigned long expires;
60951 +};
60952 +
60953 +struct gr_hash_struct {
60954 + void **table;
60955 + void **nametable;
60956 + void *first;
60957 + __u32 table_size;
60958 + __u32 used_size;
60959 + int type;
60960 +};
60961 +
60962 +/* Userspace Grsecurity ACL data structures */
60963 +
60964 +struct acl_subject_label {
60965 + char *filename;
60966 + ino_t inode;
60967 + dev_t device;
60968 + __u32 mode;
60969 + kernel_cap_t cap_mask;
60970 + kernel_cap_t cap_lower;
60971 + kernel_cap_t cap_invert_audit;
60972 +
60973 + struct rlimit res[GR_NLIMITS];
60974 + __u32 resmask;
60975 +
60976 + __u8 user_trans_type;
60977 + __u8 group_trans_type;
60978 + uid_t *user_transitions;
60979 + gid_t *group_transitions;
60980 + __u16 user_trans_num;
60981 + __u16 group_trans_num;
60982 +
60983 + __u32 sock_families[2];
60984 + __u32 ip_proto[8];
60985 + __u32 ip_type;
60986 + struct acl_ip_label **ips;
60987 + __u32 ip_num;
60988 + __u32 inaddr_any_override;
60989 +
60990 + __u32 crashes;
60991 + unsigned long expires;
60992 +
60993 + struct acl_subject_label *parent_subject;
60994 + struct gr_hash_struct *hash;
60995 + struct acl_subject_label *prev;
60996 + struct acl_subject_label *next;
60997 +
60998 + struct acl_object_label **obj_hash;
60999 + __u32 obj_hash_size;
61000 + __u16 pax_flags;
61001 +};
61002 +
61003 +struct role_allowed_ip {
61004 + __u32 addr;
61005 + __u32 netmask;
61006 +
61007 + struct role_allowed_ip *prev;
61008 + struct role_allowed_ip *next;
61009 +};
61010 +
61011 +struct role_transition {
61012 + char *rolename;
61013 +
61014 + struct role_transition *prev;
61015 + struct role_transition *next;
61016 +};
61017 +
61018 +struct acl_role_label {
61019 + char *rolename;
61020 + uid_t uidgid;
61021 + __u16 roletype;
61022 +
61023 + __u16 auth_attempts;
61024 + unsigned long expires;
61025 +
61026 + struct acl_subject_label *root_label;
61027 + struct gr_hash_struct *hash;
61028 +
61029 + struct acl_role_label *prev;
61030 + struct acl_role_label *next;
61031 +
61032 + struct role_transition *transitions;
61033 + struct role_allowed_ip *allowed_ips;
61034 + uid_t *domain_children;
61035 + __u16 domain_child_num;
61036 +
61037 + umode_t umask;
61038 +
61039 + struct acl_subject_label **subj_hash;
61040 + __u32 subj_hash_size;
61041 +};
61042 +
61043 +struct user_acl_role_db {
61044 + struct acl_role_label **r_table;
61045 + __u32 num_pointers; /* Number of allocations to track */
61046 + __u32 num_roles; /* Number of roles */
61047 + __u32 num_domain_children; /* Number of domain children */
61048 + __u32 num_subjects; /* Number of subjects */
61049 + __u32 num_objects; /* Number of objects */
61050 +};
61051 +
61052 +struct acl_object_label {
61053 + char *filename;
61054 + ino_t inode;
61055 + dev_t device;
61056 + __u32 mode;
61057 +
61058 + struct acl_subject_label *nested;
61059 + struct acl_object_label *globbed;
61060 +
61061 + /* next two structures not used */
61062 +
61063 + struct acl_object_label *prev;
61064 + struct acl_object_label *next;
61065 +};
61066 +
61067 +struct acl_ip_label {
61068 + char *iface;
61069 + __u32 addr;
61070 + __u32 netmask;
61071 + __u16 low, high;
61072 + __u8 mode;
61073 + __u32 type;
61074 + __u32 proto[8];
61075 +
61076 + /* next two structures not used */
61077 +
61078 + struct acl_ip_label *prev;
61079 + struct acl_ip_label *next;
61080 +};
61081 +
61082 +struct gr_arg {
61083 + struct user_acl_role_db role_db;
61084 + unsigned char pw[GR_PW_LEN];
61085 + unsigned char salt[GR_SALT_LEN];
61086 + unsigned char sum[GR_SHA_LEN];
61087 + unsigned char sp_role[GR_SPROLE_LEN];
61088 + struct sprole_pw *sprole_pws;
61089 + dev_t segv_device;
61090 + ino_t segv_inode;
61091 + uid_t segv_uid;
61092 + __u16 num_sprole_pws;
61093 + __u16 mode;
61094 +};
61095 +
61096 +struct gr_arg_wrapper {
61097 + struct gr_arg *arg;
61098 + __u32 version;
61099 + __u32 size;
61100 +};
61101 +
61102 +struct subject_map {
61103 + struct acl_subject_label *user;
61104 + struct acl_subject_label *kernel;
61105 + struct subject_map *prev;
61106 + struct subject_map *next;
61107 +};
61108 +
61109 +struct acl_subj_map_db {
61110 + struct subject_map **s_hash;
61111 + __u32 s_size;
61112 +};
61113 +
61114 +/* End Data Structures Section */
61115 +
61116 +/* Hash functions generated by empirical testing by Brad Spengler
61117 + Makes good use of the low bits of the inode. Generally 0-1 times
61118 + in loop for successful match. 0-3 for unsuccessful match.
61119 + Shift/add algorithm with modulus of table size and an XOR*/
61120 +
61121 +static __inline__ unsigned int
61122 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61123 +{
61124 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
61125 +}
61126 +
61127 + static __inline__ unsigned int
61128 +shash(const struct acl_subject_label *userp, const unsigned int sz)
61129 +{
61130 + return ((const unsigned long)userp % sz);
61131 +}
61132 +
61133 +static __inline__ unsigned int
61134 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61135 +{
61136 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61137 +}
61138 +
61139 +static __inline__ unsigned int
61140 +nhash(const char *name, const __u16 len, const unsigned int sz)
61141 +{
61142 + return full_name_hash((const unsigned char *)name, len) % sz;
61143 +}
61144 +
61145 +#define FOR_EACH_ROLE_START(role) \
61146 + role = role_list; \
61147 + while (role) {
61148 +
61149 +#define FOR_EACH_ROLE_END(role) \
61150 + role = role->prev; \
61151 + }
61152 +
61153 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61154 + subj = NULL; \
61155 + iter = 0; \
61156 + while (iter < role->subj_hash_size) { \
61157 + if (subj == NULL) \
61158 + subj = role->subj_hash[iter]; \
61159 + if (subj == NULL) { \
61160 + iter++; \
61161 + continue; \
61162 + }
61163 +
61164 +#define FOR_EACH_SUBJECT_END(subj,iter) \
61165 + subj = subj->next; \
61166 + if (subj == NULL) \
61167 + iter++; \
61168 + }
61169 +
61170 +
61171 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61172 + subj = role->hash->first; \
61173 + while (subj != NULL) {
61174 +
61175 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61176 + subj = subj->next; \
61177 + }
61178 +
61179 +#endif
61180 +
61181 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61182 new file mode 100644
61183 index 0000000..323ecf2
61184 --- /dev/null
61185 +++ b/include/linux/gralloc.h
61186 @@ -0,0 +1,9 @@
61187 +#ifndef __GRALLOC_H
61188 +#define __GRALLOC_H
61189 +
61190 +void acl_free_all(void);
61191 +int acl_alloc_stack_init(unsigned long size);
61192 +void *acl_alloc(unsigned long len);
61193 +void *acl_alloc_num(unsigned long num, unsigned long len);
61194 +
61195 +#endif
61196 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61197 new file mode 100644
61198 index 0000000..b30e9bc
61199 --- /dev/null
61200 +++ b/include/linux/grdefs.h
61201 @@ -0,0 +1,140 @@
61202 +#ifndef GRDEFS_H
61203 +#define GRDEFS_H
61204 +
61205 +/* Begin grsecurity status declarations */
61206 +
61207 +enum {
61208 + GR_READY = 0x01,
61209 + GR_STATUS_INIT = 0x00 // disabled state
61210 +};
61211 +
61212 +/* Begin ACL declarations */
61213 +
61214 +/* Role flags */
61215 +
61216 +enum {
61217 + GR_ROLE_USER = 0x0001,
61218 + GR_ROLE_GROUP = 0x0002,
61219 + GR_ROLE_DEFAULT = 0x0004,
61220 + GR_ROLE_SPECIAL = 0x0008,
61221 + GR_ROLE_AUTH = 0x0010,
61222 + GR_ROLE_NOPW = 0x0020,
61223 + GR_ROLE_GOD = 0x0040,
61224 + GR_ROLE_LEARN = 0x0080,
61225 + GR_ROLE_TPE = 0x0100,
61226 + GR_ROLE_DOMAIN = 0x0200,
61227 + GR_ROLE_PAM = 0x0400,
61228 + GR_ROLE_PERSIST = 0x0800
61229 +};
61230 +
61231 +/* ACL Subject and Object mode flags */
61232 +enum {
61233 + GR_DELETED = 0x80000000
61234 +};
61235 +
61236 +/* ACL Object-only mode flags */
61237 +enum {
61238 + GR_READ = 0x00000001,
61239 + GR_APPEND = 0x00000002,
61240 + GR_WRITE = 0x00000004,
61241 + GR_EXEC = 0x00000008,
61242 + GR_FIND = 0x00000010,
61243 + GR_INHERIT = 0x00000020,
61244 + GR_SETID = 0x00000040,
61245 + GR_CREATE = 0x00000080,
61246 + GR_DELETE = 0x00000100,
61247 + GR_LINK = 0x00000200,
61248 + GR_AUDIT_READ = 0x00000400,
61249 + GR_AUDIT_APPEND = 0x00000800,
61250 + GR_AUDIT_WRITE = 0x00001000,
61251 + GR_AUDIT_EXEC = 0x00002000,
61252 + GR_AUDIT_FIND = 0x00004000,
61253 + GR_AUDIT_INHERIT= 0x00008000,
61254 + GR_AUDIT_SETID = 0x00010000,
61255 + GR_AUDIT_CREATE = 0x00020000,
61256 + GR_AUDIT_DELETE = 0x00040000,
61257 + GR_AUDIT_LINK = 0x00080000,
61258 + GR_PTRACERD = 0x00100000,
61259 + GR_NOPTRACE = 0x00200000,
61260 + GR_SUPPRESS = 0x00400000,
61261 + GR_NOLEARN = 0x00800000,
61262 + GR_INIT_TRANSFER= 0x01000000
61263 +};
61264 +
61265 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61266 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61267 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61268 +
61269 +/* ACL subject-only mode flags */
61270 +enum {
61271 + GR_KILL = 0x00000001,
61272 + GR_VIEW = 0x00000002,
61273 + GR_PROTECTED = 0x00000004,
61274 + GR_LEARN = 0x00000008,
61275 + GR_OVERRIDE = 0x00000010,
61276 + /* just a placeholder, this mode is only used in userspace */
61277 + GR_DUMMY = 0x00000020,
61278 + GR_PROTSHM = 0x00000040,
61279 + GR_KILLPROC = 0x00000080,
61280 + GR_KILLIPPROC = 0x00000100,
61281 + /* just a placeholder, this mode is only used in userspace */
61282 + GR_NOTROJAN = 0x00000200,
61283 + GR_PROTPROCFD = 0x00000400,
61284 + GR_PROCACCT = 0x00000800,
61285 + GR_RELAXPTRACE = 0x00001000,
61286 + GR_NESTED = 0x00002000,
61287 + GR_INHERITLEARN = 0x00004000,
61288 + GR_PROCFIND = 0x00008000,
61289 + GR_POVERRIDE = 0x00010000,
61290 + GR_KERNELAUTH = 0x00020000,
61291 + GR_ATSECURE = 0x00040000,
61292 + GR_SHMEXEC = 0x00080000
61293 +};
61294 +
61295 +enum {
61296 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61297 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61298 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61299 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61300 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61301 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61302 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61303 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61304 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61305 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61306 +};
61307 +
61308 +enum {
61309 + GR_ID_USER = 0x01,
61310 + GR_ID_GROUP = 0x02,
61311 +};
61312 +
61313 +enum {
61314 + GR_ID_ALLOW = 0x01,
61315 + GR_ID_DENY = 0x02,
61316 +};
61317 +
61318 +#define GR_CRASH_RES 31
61319 +#define GR_UIDTABLE_MAX 500
61320 +
61321 +/* begin resource learning section */
61322 +enum {
61323 + GR_RLIM_CPU_BUMP = 60,
61324 + GR_RLIM_FSIZE_BUMP = 50000,
61325 + GR_RLIM_DATA_BUMP = 10000,
61326 + GR_RLIM_STACK_BUMP = 1000,
61327 + GR_RLIM_CORE_BUMP = 10000,
61328 + GR_RLIM_RSS_BUMP = 500000,
61329 + GR_RLIM_NPROC_BUMP = 1,
61330 + GR_RLIM_NOFILE_BUMP = 5,
61331 + GR_RLIM_MEMLOCK_BUMP = 50000,
61332 + GR_RLIM_AS_BUMP = 500000,
61333 + GR_RLIM_LOCKS_BUMP = 2,
61334 + GR_RLIM_SIGPENDING_BUMP = 5,
61335 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61336 + GR_RLIM_NICE_BUMP = 1,
61337 + GR_RLIM_RTPRIO_BUMP = 1,
61338 + GR_RLIM_RTTIME_BUMP = 1000000
61339 +};
61340 +
61341 +#endif
61342 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61343 new file mode 100644
61344 index 0000000..da390f1
61345 --- /dev/null
61346 +++ b/include/linux/grinternal.h
61347 @@ -0,0 +1,221 @@
61348 +#ifndef __GRINTERNAL_H
61349 +#define __GRINTERNAL_H
61350 +
61351 +#ifdef CONFIG_GRKERNSEC
61352 +
61353 +#include <linux/fs.h>
61354 +#include <linux/mnt_namespace.h>
61355 +#include <linux/nsproxy.h>
61356 +#include <linux/gracl.h>
61357 +#include <linux/grdefs.h>
61358 +#include <linux/grmsg.h>
61359 +
61360 +void gr_add_learn_entry(const char *fmt, ...)
61361 + __attribute__ ((format (printf, 1, 2)));
61362 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61363 + const struct vfsmount *mnt);
61364 +__u32 gr_check_create(const struct dentry *new_dentry,
61365 + const struct dentry *parent,
61366 + const struct vfsmount *mnt, const __u32 mode);
61367 +int gr_check_protected_task(const struct task_struct *task);
61368 +__u32 to_gr_audit(const __u32 reqmode);
61369 +int gr_set_acls(const int type);
61370 +int gr_apply_subject_to_task(struct task_struct *task);
61371 +int gr_acl_is_enabled(void);
61372 +char gr_roletype_to_char(void);
61373 +
61374 +void gr_handle_alertkill(struct task_struct *task);
61375 +char *gr_to_filename(const struct dentry *dentry,
61376 + const struct vfsmount *mnt);
61377 +char *gr_to_filename1(const struct dentry *dentry,
61378 + const struct vfsmount *mnt);
61379 +char *gr_to_filename2(const struct dentry *dentry,
61380 + const struct vfsmount *mnt);
61381 +char *gr_to_filename3(const struct dentry *dentry,
61382 + const struct vfsmount *mnt);
61383 +
61384 +extern int grsec_enable_ptrace_readexec;
61385 +extern int grsec_enable_harden_ptrace;
61386 +extern int grsec_enable_link;
61387 +extern int grsec_enable_fifo;
61388 +extern int grsec_enable_execve;
61389 +extern int grsec_enable_shm;
61390 +extern int grsec_enable_execlog;
61391 +extern int grsec_enable_signal;
61392 +extern int grsec_enable_audit_ptrace;
61393 +extern int grsec_enable_forkfail;
61394 +extern int grsec_enable_time;
61395 +extern int grsec_enable_rofs;
61396 +extern int grsec_enable_chroot_shmat;
61397 +extern int grsec_enable_chroot_mount;
61398 +extern int grsec_enable_chroot_double;
61399 +extern int grsec_enable_chroot_pivot;
61400 +extern int grsec_enable_chroot_chdir;
61401 +extern int grsec_enable_chroot_chmod;
61402 +extern int grsec_enable_chroot_mknod;
61403 +extern int grsec_enable_chroot_fchdir;
61404 +extern int grsec_enable_chroot_nice;
61405 +extern int grsec_enable_chroot_execlog;
61406 +extern int grsec_enable_chroot_caps;
61407 +extern int grsec_enable_chroot_sysctl;
61408 +extern int grsec_enable_chroot_unix;
61409 +extern int grsec_enable_tpe;
61410 +extern int grsec_tpe_gid;
61411 +extern int grsec_enable_tpe_all;
61412 +extern int grsec_enable_tpe_invert;
61413 +extern int grsec_enable_socket_all;
61414 +extern int grsec_socket_all_gid;
61415 +extern int grsec_enable_socket_client;
61416 +extern int grsec_socket_client_gid;
61417 +extern int grsec_enable_socket_server;
61418 +extern int grsec_socket_server_gid;
61419 +extern int grsec_audit_gid;
61420 +extern int grsec_enable_group;
61421 +extern int grsec_enable_audit_textrel;
61422 +extern int grsec_enable_log_rwxmaps;
61423 +extern int grsec_enable_mount;
61424 +extern int grsec_enable_chdir;
61425 +extern int grsec_resource_logging;
61426 +extern int grsec_enable_blackhole;
61427 +extern int grsec_lastack_retries;
61428 +extern int grsec_enable_brute;
61429 +extern int grsec_lock;
61430 +
61431 +extern spinlock_t grsec_alert_lock;
61432 +extern unsigned long grsec_alert_wtime;
61433 +extern unsigned long grsec_alert_fyet;
61434 +
61435 +extern spinlock_t grsec_audit_lock;
61436 +
61437 +extern rwlock_t grsec_exec_file_lock;
61438 +
61439 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61440 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61441 + (tsk)->exec_file->f_vfsmnt) : "/")
61442 +
61443 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61444 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61445 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61446 +
61447 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61448 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61449 + (tsk)->exec_file->f_vfsmnt) : "/")
61450 +
61451 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61452 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61453 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61454 +
61455 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61456 +
61457 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61458 +
61459 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61460 + (task)->pid, (cred)->uid, \
61461 + (cred)->euid, (cred)->gid, (cred)->egid, \
61462 + gr_parent_task_fullpath(task), \
61463 + (task)->real_parent->comm, (task)->real_parent->pid, \
61464 + (pcred)->uid, (pcred)->euid, \
61465 + (pcred)->gid, (pcred)->egid
61466 +
61467 +#define GR_CHROOT_CAPS {{ \
61468 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61469 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61470 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61471 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61472 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61473 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61474 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61475 +
61476 +#define security_learn(normal_msg,args...) \
61477 +({ \
61478 + read_lock(&grsec_exec_file_lock); \
61479 + gr_add_learn_entry(normal_msg "\n", ## args); \
61480 + read_unlock(&grsec_exec_file_lock); \
61481 +})
61482 +
61483 +enum {
61484 + GR_DO_AUDIT,
61485 + GR_DONT_AUDIT,
61486 + /* used for non-audit messages that we shouldn't kill the task on */
61487 + GR_DONT_AUDIT_GOOD
61488 +};
61489 +
61490 +enum {
61491 + GR_TTYSNIFF,
61492 + GR_RBAC,
61493 + GR_RBAC_STR,
61494 + GR_STR_RBAC,
61495 + GR_RBAC_MODE2,
61496 + GR_RBAC_MODE3,
61497 + GR_FILENAME,
61498 + GR_SYSCTL_HIDDEN,
61499 + GR_NOARGS,
61500 + GR_ONE_INT,
61501 + GR_ONE_INT_TWO_STR,
61502 + GR_ONE_STR,
61503 + GR_STR_INT,
61504 + GR_TWO_STR_INT,
61505 + GR_TWO_INT,
61506 + GR_TWO_U64,
61507 + GR_THREE_INT,
61508 + GR_FIVE_INT_TWO_STR,
61509 + GR_TWO_STR,
61510 + GR_THREE_STR,
61511 + GR_FOUR_STR,
61512 + GR_STR_FILENAME,
61513 + GR_FILENAME_STR,
61514 + GR_FILENAME_TWO_INT,
61515 + GR_FILENAME_TWO_INT_STR,
61516 + GR_TEXTREL,
61517 + GR_PTRACE,
61518 + GR_RESOURCE,
61519 + GR_CAP,
61520 + GR_SIG,
61521 + GR_SIG2,
61522 + GR_CRASH1,
61523 + GR_CRASH2,
61524 + GR_PSACCT,
61525 + GR_RWXMAP
61526 +};
61527 +
61528 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61529 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61530 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61531 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61532 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61533 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61534 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61535 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61536 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61537 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61538 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61539 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61540 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61541 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61542 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61543 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61544 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61545 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61546 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61547 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61548 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61549 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61550 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61551 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61552 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61553 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61554 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61555 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61556 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61557 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61558 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61559 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61560 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61561 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61562 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61563 +
61564 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61565 +
61566 +#endif
61567 +
61568 +#endif
61569 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61570 new file mode 100644
61571 index 0000000..ae576a1
61572 --- /dev/null
61573 +++ b/include/linux/grmsg.h
61574 @@ -0,0 +1,109 @@
61575 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61576 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61577 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61578 +#define GR_STOPMOD_MSG "denied modification of module state by "
61579 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61580 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61581 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61582 +#define GR_IOPL_MSG "denied use of iopl() by "
61583 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61584 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61585 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61586 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61587 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61588 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61589 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61590 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61591 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61592 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61593 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61594 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61595 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61596 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61597 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61598 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61599 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61600 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61601 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61602 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61603 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61604 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61605 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61606 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61607 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61608 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61609 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61610 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61611 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61612 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61613 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61614 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61615 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61616 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61617 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61618 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61619 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61620 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61621 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61622 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61623 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61624 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61625 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61626 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61627 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61628 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61629 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61630 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61631 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61632 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61633 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61634 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61635 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61636 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61637 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61638 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61639 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61640 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61641 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61642 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61643 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61644 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61645 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61646 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61647 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61648 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61649 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61650 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61651 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61652 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61653 +#define GR_TIME_MSG "time set by "
61654 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61655 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61656 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61657 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61658 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61659 +#define GR_BIND_MSG "denied bind() by "
61660 +#define GR_CONNECT_MSG "denied connect() by "
61661 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61662 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61663 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61664 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61665 +#define GR_CAP_ACL_MSG "use of %s denied for "
61666 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61667 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61668 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61669 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61670 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61671 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61672 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61673 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61674 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61675 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61676 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61677 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61678 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61679 +#define GR_VM86_MSG "denied use of vm86 by "
61680 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61681 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61682 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61683 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61684 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61685 new file mode 100644
61686 index 0000000..acd05db
61687 --- /dev/null
61688 +++ b/include/linux/grsecurity.h
61689 @@ -0,0 +1,232 @@
61690 +#ifndef GR_SECURITY_H
61691 +#define GR_SECURITY_H
61692 +#include <linux/fs.h>
61693 +#include <linux/fs_struct.h>
61694 +#include <linux/binfmts.h>
61695 +#include <linux/gracl.h>
61696 +
61697 +/* notify of brain-dead configs */
61698 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61699 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61700 +#endif
61701 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61702 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61703 +#endif
61704 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61705 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61706 +#endif
61707 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61708 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61709 +#endif
61710 +
61711 +#include <linux/compat.h>
61712 +
61713 +struct user_arg_ptr {
61714 +#ifdef CONFIG_COMPAT
61715 + bool is_compat;
61716 +#endif
61717 + union {
61718 + const char __user *const __user *native;
61719 +#ifdef CONFIG_COMPAT
61720 + compat_uptr_t __user *compat;
61721 +#endif
61722 + } ptr;
61723 +};
61724 +
61725 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61726 +void gr_handle_brute_check(void);
61727 +void gr_handle_kernel_exploit(void);
61728 +int gr_process_user_ban(void);
61729 +
61730 +char gr_roletype_to_char(void);
61731 +
61732 +int gr_acl_enable_at_secure(void);
61733 +
61734 +int gr_check_user_change(int real, int effective, int fs);
61735 +int gr_check_group_change(int real, int effective, int fs);
61736 +
61737 +void gr_del_task_from_ip_table(struct task_struct *p);
61738 +
61739 +int gr_pid_is_chrooted(struct task_struct *p);
61740 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61741 +int gr_handle_chroot_nice(void);
61742 +int gr_handle_chroot_sysctl(const int op);
61743 +int gr_handle_chroot_setpriority(struct task_struct *p,
61744 + const int niceval);
61745 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61746 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61747 + const struct vfsmount *mnt);
61748 +void gr_handle_chroot_chdir(struct path *path);
61749 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61750 + const struct vfsmount *mnt, const int mode);
61751 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61752 + const struct vfsmount *mnt, const int mode);
61753 +int gr_handle_chroot_mount(const struct dentry *dentry,
61754 + const struct vfsmount *mnt,
61755 + const char *dev_name);
61756 +int gr_handle_chroot_pivot(void);
61757 +int gr_handle_chroot_unix(const pid_t pid);
61758 +
61759 +int gr_handle_rawio(const struct inode *inode);
61760 +
61761 +void gr_handle_ioperm(void);
61762 +void gr_handle_iopl(void);
61763 +
61764 +umode_t gr_acl_umask(void);
61765 +
61766 +int gr_tpe_allow(const struct file *file);
61767 +
61768 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61769 +void gr_clear_chroot_entries(struct task_struct *task);
61770 +
61771 +void gr_log_forkfail(const int retval);
61772 +void gr_log_timechange(void);
61773 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61774 +void gr_log_chdir(const struct dentry *dentry,
61775 + const struct vfsmount *mnt);
61776 +void gr_log_chroot_exec(const struct dentry *dentry,
61777 + const struct vfsmount *mnt);
61778 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61779 +void gr_log_remount(const char *devname, const int retval);
61780 +void gr_log_unmount(const char *devname, const int retval);
61781 +void gr_log_mount(const char *from, const char *to, const int retval);
61782 +void gr_log_textrel(struct vm_area_struct *vma);
61783 +void gr_log_rwxmmap(struct file *file);
61784 +void gr_log_rwxmprotect(struct file *file);
61785 +
61786 +int gr_handle_follow_link(const struct inode *parent,
61787 + const struct inode *inode,
61788 + const struct dentry *dentry,
61789 + const struct vfsmount *mnt);
61790 +int gr_handle_fifo(const struct dentry *dentry,
61791 + const struct vfsmount *mnt,
61792 + const struct dentry *dir, const int flag,
61793 + const int acc_mode);
61794 +int gr_handle_hardlink(const struct dentry *dentry,
61795 + const struct vfsmount *mnt,
61796 + struct inode *inode,
61797 + const int mode, const char *to);
61798 +
61799 +int gr_is_capable(const int cap);
61800 +int gr_is_capable_nolog(const int cap);
61801 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61802 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61803 +
61804 +void gr_learn_resource(const struct task_struct *task, const int limit,
61805 + const unsigned long wanted, const int gt);
61806 +void gr_copy_label(struct task_struct *tsk);
61807 +void gr_handle_crash(struct task_struct *task, const int sig);
61808 +int gr_handle_signal(const struct task_struct *p, const int sig);
61809 +int gr_check_crash_uid(const uid_t uid);
61810 +int gr_check_protected_task(const struct task_struct *task);
61811 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61812 +int gr_acl_handle_mmap(const struct file *file,
61813 + const unsigned long prot);
61814 +int gr_acl_handle_mprotect(const struct file *file,
61815 + const unsigned long prot);
61816 +int gr_check_hidden_task(const struct task_struct *tsk);
61817 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61818 + const struct vfsmount *mnt);
61819 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61820 + const struct vfsmount *mnt);
61821 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61822 + const struct vfsmount *mnt, const int fmode);
61823 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61824 + const struct vfsmount *mnt, umode_t *mode);
61825 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61826 + const struct vfsmount *mnt);
61827 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61828 + const struct vfsmount *mnt);
61829 +int gr_handle_ptrace(struct task_struct *task, const long request);
61830 +int gr_handle_proc_ptrace(struct task_struct *task);
61831 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61832 + const struct vfsmount *mnt);
61833 +int gr_check_crash_exec(const struct file *filp);
61834 +int gr_acl_is_enabled(void);
61835 +void gr_set_kernel_label(struct task_struct *task);
61836 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61837 + const gid_t gid);
61838 +int gr_set_proc_label(const struct dentry *dentry,
61839 + const struct vfsmount *mnt,
61840 + const int unsafe_flags);
61841 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61842 + const struct vfsmount *mnt);
61843 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61844 + const struct vfsmount *mnt, int acc_mode);
61845 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61846 + const struct dentry *p_dentry,
61847 + const struct vfsmount *p_mnt,
61848 + int open_flags, int acc_mode, const int imode);
61849 +void gr_handle_create(const struct dentry *dentry,
61850 + const struct vfsmount *mnt);
61851 +void gr_handle_proc_create(const struct dentry *dentry,
61852 + const struct inode *inode);
61853 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61854 + const struct dentry *parent_dentry,
61855 + const struct vfsmount *parent_mnt,
61856 + const int mode);
61857 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61858 + const struct dentry *parent_dentry,
61859 + const struct vfsmount *parent_mnt);
61860 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61861 + const struct vfsmount *mnt);
61862 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61863 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61864 + const struct vfsmount *mnt);
61865 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61866 + const struct dentry *parent_dentry,
61867 + const struct vfsmount *parent_mnt,
61868 + const char *from);
61869 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61870 + const struct dentry *parent_dentry,
61871 + const struct vfsmount *parent_mnt,
61872 + const struct dentry *old_dentry,
61873 + const struct vfsmount *old_mnt, const char *to);
61874 +int gr_acl_handle_rename(struct dentry *new_dentry,
61875 + struct dentry *parent_dentry,
61876 + const struct vfsmount *parent_mnt,
61877 + struct dentry *old_dentry,
61878 + struct inode *old_parent_inode,
61879 + struct vfsmount *old_mnt, const char *newname);
61880 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61881 + struct dentry *old_dentry,
61882 + struct dentry *new_dentry,
61883 + struct vfsmount *mnt, const __u8 replace);
61884 +__u32 gr_check_link(const struct dentry *new_dentry,
61885 + const struct dentry *parent_dentry,
61886 + const struct vfsmount *parent_mnt,
61887 + const struct dentry *old_dentry,
61888 + const struct vfsmount *old_mnt);
61889 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61890 + const unsigned int namelen, const ino_t ino);
61891 +
61892 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61893 + const struct vfsmount *mnt);
61894 +void gr_acl_handle_exit(void);
61895 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61896 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61897 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61898 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61899 +void gr_audit_ptrace(struct task_struct *task);
61900 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61901 +
61902 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61903 +
61904 +#ifdef CONFIG_GRKERNSEC
61905 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61906 +void gr_handle_vm86(void);
61907 +void gr_handle_mem_readwrite(u64 from, u64 to);
61908 +
61909 +void gr_log_badprocpid(const char *entry);
61910 +
61911 +extern int grsec_enable_dmesg;
61912 +extern int grsec_disable_privio;
61913 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61914 +extern int grsec_enable_chroot_findtask;
61915 +#endif
61916 +#ifdef CONFIG_GRKERNSEC_SETXID
61917 +extern int grsec_enable_setxid;
61918 +#endif
61919 +#endif
61920 +
61921 +#endif
61922 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61923 new file mode 100644
61924 index 0000000..e7ffaaf
61925 --- /dev/null
61926 +++ b/include/linux/grsock.h
61927 @@ -0,0 +1,19 @@
61928 +#ifndef __GRSOCK_H
61929 +#define __GRSOCK_H
61930 +
61931 +extern void gr_attach_curr_ip(const struct sock *sk);
61932 +extern int gr_handle_sock_all(const int family, const int type,
61933 + const int protocol);
61934 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61935 +extern int gr_handle_sock_server_other(const struct sock *sck);
61936 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61937 +extern int gr_search_connect(struct socket * sock,
61938 + struct sockaddr_in * addr);
61939 +extern int gr_search_bind(struct socket * sock,
61940 + struct sockaddr_in * addr);
61941 +extern int gr_search_listen(struct socket * sock);
61942 +extern int gr_search_accept(struct socket * sock);
61943 +extern int gr_search_socket(const int domain, const int type,
61944 + const int protocol);
61945 +
61946 +#endif
61947 diff --git a/include/linux/hid.h b/include/linux/hid.h
61948 index 3a95da6..51986f1 100644
61949 --- a/include/linux/hid.h
61950 +++ b/include/linux/hid.h
61951 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61952 unsigned int code, int value);
61953
61954 int (*parse)(struct hid_device *hdev);
61955 -};
61956 +} __no_const;
61957
61958 #define PM_HINT_FULLON 1<<5
61959 #define PM_HINT_NORMAL 1<<1
61960 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61961 index 3a93f73..b19d0b3 100644
61962 --- a/include/linux/highmem.h
61963 +++ b/include/linux/highmem.h
61964 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61965 kunmap_atomic(kaddr, KM_USER0);
61966 }
61967
61968 +static inline void sanitize_highpage(struct page *page)
61969 +{
61970 + void *kaddr;
61971 + unsigned long flags;
61972 +
61973 + local_irq_save(flags);
61974 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61975 + clear_page(kaddr);
61976 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61977 + local_irq_restore(flags);
61978 +}
61979 +
61980 static inline void zero_user_segments(struct page *page,
61981 unsigned start1, unsigned end1,
61982 unsigned start2, unsigned end2)
61983 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61984 index 8e25a91..551b161 100644
61985 --- a/include/linux/i2c.h
61986 +++ b/include/linux/i2c.h
61987 @@ -364,6 +364,7 @@ struct i2c_algorithm {
61988 /* To determine what the adapter supports */
61989 u32 (*functionality) (struct i2c_adapter *);
61990 };
61991 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61992
61993 /*
61994 * i2c_adapter is the structure used to identify a physical i2c bus along
61995 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61996 index a6deef4..c56a7f2 100644
61997 --- a/include/linux/i2o.h
61998 +++ b/include/linux/i2o.h
61999 @@ -564,7 +564,7 @@ struct i2o_controller {
62000 struct i2o_device *exec; /* Executive */
62001 #if BITS_PER_LONG == 64
62002 spinlock_t context_list_lock; /* lock for context_list */
62003 - atomic_t context_list_counter; /* needed for unique contexts */
62004 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62005 struct list_head context_list; /* list of context id's
62006 and pointers */
62007 #endif
62008 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62009 index 58404b0..439ed95 100644
62010 --- a/include/linux/if_team.h
62011 +++ b/include/linux/if_team.h
62012 @@ -64,6 +64,7 @@ struct team_mode_ops {
62013 void (*port_leave)(struct team *team, struct team_port *port);
62014 void (*port_change_mac)(struct team *team, struct team_port *port);
62015 };
62016 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62017
62018 enum team_option_type {
62019 TEAM_OPTION_TYPE_U32,
62020 @@ -112,7 +113,7 @@ struct team {
62021 struct list_head option_list;
62022
62023 const struct team_mode *mode;
62024 - struct team_mode_ops ops;
62025 + team_mode_ops_no_const ops;
62026 long mode_priv[TEAM_MODE_PRIV_LONGS];
62027 };
62028
62029 diff --git a/include/linux/init.h b/include/linux/init.h
62030 index 6b95109..4aca62c 100644
62031 --- a/include/linux/init.h
62032 +++ b/include/linux/init.h
62033 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
62034
62035 /* Each module must use one module_init(). */
62036 #define module_init(initfn) \
62037 - static inline initcall_t __inittest(void) \
62038 + static inline __used initcall_t __inittest(void) \
62039 { return initfn; } \
62040 int init_module(void) __attribute__((alias(#initfn)));
62041
62042 /* This is only required if you want to be unloadable. */
62043 #define module_exit(exitfn) \
62044 - static inline exitcall_t __exittest(void) \
62045 + static inline __used exitcall_t __exittest(void) \
62046 { return exitfn; } \
62047 void cleanup_module(void) __attribute__((alias(#exitfn)));
62048
62049 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62050 index 9c66b1a..a3fdded 100644
62051 --- a/include/linux/init_task.h
62052 +++ b/include/linux/init_task.h
62053 @@ -127,6 +127,12 @@ extern struct cred init_cred;
62054
62055 #define INIT_TASK_COMM "swapper"
62056
62057 +#ifdef CONFIG_X86
62058 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62059 +#else
62060 +#define INIT_TASK_THREAD_INFO
62061 +#endif
62062 +
62063 /*
62064 * INIT_TASK is used to set up the first task table, touch at
62065 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62066 @@ -165,6 +171,7 @@ extern struct cred init_cred;
62067 RCU_INIT_POINTER(.cred, &init_cred), \
62068 .comm = INIT_TASK_COMM, \
62069 .thread = INIT_THREAD, \
62070 + INIT_TASK_THREAD_INFO \
62071 .fs = &init_fs, \
62072 .files = &init_files, \
62073 .signal = &init_signals, \
62074 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62075 index e6ca56d..8583707 100644
62076 --- a/include/linux/intel-iommu.h
62077 +++ b/include/linux/intel-iommu.h
62078 @@ -296,7 +296,7 @@ struct iommu_flush {
62079 u8 fm, u64 type);
62080 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62081 unsigned int size_order, u64 type);
62082 -};
62083 +} __no_const;
62084
62085 enum {
62086 SR_DMAR_FECTL_REG,
62087 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62088 index a64b00e..464d8bc 100644
62089 --- a/include/linux/interrupt.h
62090 +++ b/include/linux/interrupt.h
62091 @@ -441,7 +441,7 @@ enum
62092 /* map softirq index to softirq name. update 'softirq_to_name' in
62093 * kernel/softirq.c when adding a new softirq.
62094 */
62095 -extern char *softirq_to_name[NR_SOFTIRQS];
62096 +extern const char * const softirq_to_name[NR_SOFTIRQS];
62097
62098 /* softirq mask and active fields moved to irq_cpustat_t in
62099 * asm/hardirq.h to get better cache usage. KAO
62100 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62101
62102 struct softirq_action
62103 {
62104 - void (*action)(struct softirq_action *);
62105 + void (*action)(void);
62106 };
62107
62108 asmlinkage void do_softirq(void);
62109 asmlinkage void __do_softirq(void);
62110 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62111 +extern void open_softirq(int nr, void (*action)(void));
62112 extern void softirq_init(void);
62113 static inline void __raise_softirq_irqoff(unsigned int nr)
62114 {
62115 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62116 index 3875719..4cd454c 100644
62117 --- a/include/linux/kallsyms.h
62118 +++ b/include/linux/kallsyms.h
62119 @@ -15,7 +15,8 @@
62120
62121 struct module;
62122
62123 -#ifdef CONFIG_KALLSYMS
62124 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62125 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62126 /* Lookup the address for a symbol. Returns 0 if not found. */
62127 unsigned long kallsyms_lookup_name(const char *name);
62128
62129 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62130 /* Stupid that this does nothing, but I didn't create this mess. */
62131 #define __print_symbol(fmt, addr)
62132 #endif /*CONFIG_KALLSYMS*/
62133 +#else /* when included by kallsyms.c, vsnprintf.c, or
62134 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62135 +extern void __print_symbol(const char *fmt, unsigned long address);
62136 +extern int sprint_backtrace(char *buffer, unsigned long address);
62137 +extern int sprint_symbol(char *buffer, unsigned long address);
62138 +const char *kallsyms_lookup(unsigned long addr,
62139 + unsigned long *symbolsize,
62140 + unsigned long *offset,
62141 + char **modname, char *namebuf);
62142 +#endif
62143
62144 /* This macro allows us to keep printk typechecking */
62145 static __printf(1, 2)
62146 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62147 index c4d2fc1..5df9c19 100644
62148 --- a/include/linux/kgdb.h
62149 +++ b/include/linux/kgdb.h
62150 @@ -53,7 +53,7 @@ extern int kgdb_connected;
62151 extern int kgdb_io_module_registered;
62152
62153 extern atomic_t kgdb_setting_breakpoint;
62154 -extern atomic_t kgdb_cpu_doing_single_step;
62155 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62156
62157 extern struct task_struct *kgdb_usethread;
62158 extern struct task_struct *kgdb_contthread;
62159 @@ -252,7 +252,7 @@ struct kgdb_arch {
62160 void (*disable_hw_break)(struct pt_regs *regs);
62161 void (*remove_all_hw_break)(void);
62162 void (*correct_hw_break)(void);
62163 -};
62164 +} __do_const;
62165
62166 /**
62167 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62168 @@ -277,7 +277,7 @@ struct kgdb_io {
62169 void (*pre_exception) (void);
62170 void (*post_exception) (void);
62171 int is_console;
62172 -};
62173 +} __do_const;
62174
62175 extern struct kgdb_arch arch_kgdb_ops;
62176
62177 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62178 index 0fb48ef..1b680b2 100644
62179 --- a/include/linux/kmod.h
62180 +++ b/include/linux/kmod.h
62181 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62182 * usually useless though. */
62183 extern __printf(2, 3)
62184 int __request_module(bool wait, const char *name, ...);
62185 +extern __printf(3, 4)
62186 +int ___request_module(bool wait, char *param_name, const char *name, ...);
62187 #define request_module(mod...) __request_module(true, mod)
62188 #define request_module_nowait(mod...) __request_module(false, mod)
62189 #define try_then_request_module(x, mod...) \
62190 diff --git a/include/linux/kref.h b/include/linux/kref.h
62191 index 9c07dce..a92fa71 100644
62192 --- a/include/linux/kref.h
62193 +++ b/include/linux/kref.h
62194 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62195 static inline int kref_sub(struct kref *kref, unsigned int count,
62196 void (*release)(struct kref *kref))
62197 {
62198 - WARN_ON(release == NULL);
62199 + BUG_ON(release == NULL);
62200
62201 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62202 release(kref);
62203 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62204 index 900c763..098aefa 100644
62205 --- a/include/linux/kvm_host.h
62206 +++ b/include/linux/kvm_host.h
62207 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62208 void vcpu_load(struct kvm_vcpu *vcpu);
62209 void vcpu_put(struct kvm_vcpu *vcpu);
62210
62211 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62212 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62213 struct module *module);
62214 void kvm_exit(void);
62215
62216 @@ -416,20 +416,20 @@ void kvm_get_pfn(pfn_t pfn);
62217 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
62218 int len);
62219 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
62220 - unsigned long len);
62221 -int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
62222 + unsigned long len) __size_overflow(4);
62223 +int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4);
62224 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62225 - void *data, unsigned long len);
62226 + void *data, unsigned long len) __size_overflow(4);
62227 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
62228 int offset, int len);
62229 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
62230 - unsigned long len);
62231 + unsigned long len) __size_overflow(2,4);
62232 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62233 - void *data, unsigned long len);
62234 + void *data, unsigned long len) __size_overflow(4);
62235 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62236 gpa_t gpa);
62237 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
62238 -int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
62239 +int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3);
62240 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
62241 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
62242 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
62243 @@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62244 struct kvm_guest_debug *dbg);
62245 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62246
62247 -int kvm_arch_init(void *opaque);
62248 +int kvm_arch_init(const void *opaque);
62249 void kvm_arch_exit(void);
62250
62251 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62252 @@ -593,6 +593,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
62253
62254 #ifdef CONFIG_IOMMU_API
62255 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
62256 +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
62257 int kvm_iommu_map_guest(struct kvm *kvm);
62258 int kvm_iommu_unmap_guest(struct kvm *kvm);
62259 int kvm_assign_device(struct kvm *kvm,
62260 @@ -606,6 +607,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
62261 return 0;
62262 }
62263
62264 +static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
62265 + struct kvm_memory_slot *slot)
62266 +{
62267 +}
62268 +
62269 static inline int kvm_iommu_map_guest(struct kvm *kvm)
62270 {
62271 return -ENODEV;
62272 @@ -721,7 +727,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
62273 int kvm_set_irq_routing(struct kvm *kvm,
62274 const struct kvm_irq_routing_entry *entries,
62275 unsigned nr,
62276 - unsigned flags);
62277 + unsigned flags) __size_overflow(3);
62278 void kvm_free_irq_routing(struct kvm *kvm);
62279
62280 #else
62281 diff --git a/include/linux/libata.h b/include/linux/libata.h
62282 index cafc09a..d7e7829 100644
62283 --- a/include/linux/libata.h
62284 +++ b/include/linux/libata.h
62285 @@ -909,7 +909,7 @@ struct ata_port_operations {
62286 * fields must be pointers.
62287 */
62288 const struct ata_port_operations *inherits;
62289 -};
62290 +} __do_const;
62291
62292 struct ata_port_info {
62293 unsigned long flags;
62294 diff --git a/include/linux/mca.h b/include/linux/mca.h
62295 index 3797270..7765ede 100644
62296 --- a/include/linux/mca.h
62297 +++ b/include/linux/mca.h
62298 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62299 int region);
62300 void * (*mca_transform_memory)(struct mca_device *,
62301 void *memory);
62302 -};
62303 +} __no_const;
62304
62305 struct mca_bus {
62306 u64 default_dma_mask;
62307 diff --git a/include/linux/memory.h b/include/linux/memory.h
62308 index 1ac7f6e..a5794d0 100644
62309 --- a/include/linux/memory.h
62310 +++ b/include/linux/memory.h
62311 @@ -143,7 +143,7 @@ struct memory_accessor {
62312 size_t count);
62313 ssize_t (*write)(struct memory_accessor *, const char *buf,
62314 off_t offset, size_t count);
62315 -};
62316 +} __no_const;
62317
62318 /*
62319 * Kernel text modification mutex, used for code patching. Users of this lock
62320 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62321 index 9970337..9444122 100644
62322 --- a/include/linux/mfd/abx500.h
62323 +++ b/include/linux/mfd/abx500.h
62324 @@ -188,6 +188,7 @@ struct abx500_ops {
62325 int (*event_registers_startup_state_get) (struct device *, u8 *);
62326 int (*startup_irq_enabled) (struct device *, unsigned int);
62327 };
62328 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62329
62330 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62331 void abx500_remove_ops(struct device *dev);
62332 diff --git a/include/linux/mm.h b/include/linux/mm.h
62333 index 17b27cd..467ba2f 100644
62334 --- a/include/linux/mm.h
62335 +++ b/include/linux/mm.h
62336 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
62337
62338 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62339 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62340 +
62341 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62342 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62343 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62344 +#else
62345 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62346 +#endif
62347 +
62348 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62349 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62350
62351 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
62352 int set_page_dirty_lock(struct page *page);
62353 int clear_page_dirty_for_io(struct page *page);
62354
62355 -/* Is the vma a continuation of the stack vma above it? */
62356 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62357 -{
62358 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62359 -}
62360 -
62361 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62362 - unsigned long addr)
62363 -{
62364 - return (vma->vm_flags & VM_GROWSDOWN) &&
62365 - (vma->vm_start == addr) &&
62366 - !vma_growsdown(vma->vm_prev, addr);
62367 -}
62368 -
62369 -/* Is the vma a continuation of the stack vma below it? */
62370 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62371 -{
62372 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62373 -}
62374 -
62375 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62376 - unsigned long addr)
62377 -{
62378 - return (vma->vm_flags & VM_GROWSUP) &&
62379 - (vma->vm_end == addr) &&
62380 - !vma_growsup(vma->vm_next, addr);
62381 -}
62382 -
62383 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62384 unsigned long old_addr, struct vm_area_struct *new_vma,
62385 unsigned long new_addr, unsigned long len);
62386 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62387 }
62388 #endif
62389
62390 +#ifdef CONFIG_MMU
62391 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62392 +#else
62393 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62394 +{
62395 + return __pgprot(0);
62396 +}
62397 +#endif
62398 +
62399 int vma_wants_writenotify(struct vm_area_struct *vma);
62400
62401 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62402 @@ -1409,6 +1397,7 @@ out:
62403 }
62404
62405 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62406 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62407
62408 extern unsigned long do_brk(unsigned long, unsigned long);
62409
62410 @@ -1466,6 +1455,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62411 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62412 struct vm_area_struct **pprev);
62413
62414 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62415 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62416 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62417 +
62418 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62419 NULL if none. Assume start_addr < end_addr. */
62420 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62421 @@ -1494,15 +1487,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62422 return vma;
62423 }
62424
62425 -#ifdef CONFIG_MMU
62426 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62427 -#else
62428 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62429 -{
62430 - return __pgprot(0);
62431 -}
62432 -#endif
62433 -
62434 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62435 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62436 unsigned long pfn, unsigned long size, pgprot_t);
62437 @@ -1606,7 +1590,7 @@ extern int unpoison_memory(unsigned long pfn);
62438 extern int sysctl_memory_failure_early_kill;
62439 extern int sysctl_memory_failure_recovery;
62440 extern void shake_page(struct page *p, int access);
62441 -extern atomic_long_t mce_bad_pages;
62442 +extern atomic_long_unchecked_t mce_bad_pages;
62443 extern int soft_offline_page(struct page *page, int flags);
62444
62445 extern void dump_page(struct page *page);
62446 @@ -1637,5 +1621,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62447 static inline bool page_is_guard(struct page *page) { return false; }
62448 #endif /* CONFIG_DEBUG_PAGEALLOC */
62449
62450 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62451 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62452 +#else
62453 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62454 +#endif
62455 +
62456 #endif /* __KERNEL__ */
62457 #endif /* _LINUX_MM_H */
62458 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62459 index 3cc3062..efeaeb7 100644
62460 --- a/include/linux/mm_types.h
62461 +++ b/include/linux/mm_types.h
62462 @@ -252,6 +252,8 @@ struct vm_area_struct {
62463 #ifdef CONFIG_NUMA
62464 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62465 #endif
62466 +
62467 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62468 };
62469
62470 struct core_thread {
62471 @@ -326,7 +328,7 @@ struct mm_struct {
62472 unsigned long def_flags;
62473 unsigned long nr_ptes; /* Page table pages */
62474 unsigned long start_code, end_code, start_data, end_data;
62475 - unsigned long start_brk, brk, start_stack;
62476 + unsigned long brk_gap, start_brk, brk, start_stack;
62477 unsigned long arg_start, arg_end, env_start, env_end;
62478
62479 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62480 @@ -388,6 +390,24 @@ struct mm_struct {
62481 #ifdef CONFIG_CPUMASK_OFFSTACK
62482 struct cpumask cpumask_allocation;
62483 #endif
62484 +
62485 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62486 + unsigned long pax_flags;
62487 +#endif
62488 +
62489 +#ifdef CONFIG_PAX_DLRESOLVE
62490 + unsigned long call_dl_resolve;
62491 +#endif
62492 +
62493 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62494 + unsigned long call_syscall;
62495 +#endif
62496 +
62497 +#ifdef CONFIG_PAX_ASLR
62498 + unsigned long delta_mmap; /* randomized offset */
62499 + unsigned long delta_stack; /* randomized offset */
62500 +#endif
62501 +
62502 };
62503
62504 static inline void mm_init_cpumask(struct mm_struct *mm)
62505 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62506 index 1d1b1e1..2a13c78 100644
62507 --- a/include/linux/mmu_notifier.h
62508 +++ b/include/linux/mmu_notifier.h
62509 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62510 */
62511 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62512 ({ \
62513 - pte_t __pte; \
62514 + pte_t ___pte; \
62515 struct vm_area_struct *___vma = __vma; \
62516 unsigned long ___address = __address; \
62517 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62518 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62519 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62520 - __pte; \
62521 + ___pte; \
62522 })
62523
62524 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62525 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62526 index 650ba2f..af0a58c 100644
62527 --- a/include/linux/mmzone.h
62528 +++ b/include/linux/mmzone.h
62529 @@ -379,7 +379,7 @@ struct zone {
62530 unsigned long flags; /* zone flags, see below */
62531
62532 /* Zone statistics */
62533 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62534 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62535
62536 /*
62537 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62538 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62539 index 83ac071..2656e0e 100644
62540 --- a/include/linux/mod_devicetable.h
62541 +++ b/include/linux/mod_devicetable.h
62542 @@ -12,7 +12,7 @@
62543 typedef unsigned long kernel_ulong_t;
62544 #endif
62545
62546 -#define PCI_ANY_ID (~0)
62547 +#define PCI_ANY_ID ((__u16)~0)
62548
62549 struct pci_device_id {
62550 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62551 @@ -131,7 +131,7 @@ struct usb_device_id {
62552 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62553 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62554
62555 -#define HID_ANY_ID (~0)
62556 +#define HID_ANY_ID (~0U)
62557
62558 struct hid_device_id {
62559 __u16 bus;
62560 diff --git a/include/linux/module.h b/include/linux/module.h
62561 index 4598bf0..e069d7f 100644
62562 --- a/include/linux/module.h
62563 +++ b/include/linux/module.h
62564 @@ -17,6 +17,7 @@
62565 #include <linux/moduleparam.h>
62566 #include <linux/tracepoint.h>
62567 #include <linux/export.h>
62568 +#include <linux/fs.h>
62569
62570 #include <linux/percpu.h>
62571 #include <asm/module.h>
62572 @@ -275,19 +276,16 @@ struct module
62573 int (*init)(void);
62574
62575 /* If this is non-NULL, vfree after init() returns */
62576 - void *module_init;
62577 + void *module_init_rx, *module_init_rw;
62578
62579 /* Here is the actual code + data, vfree'd on unload. */
62580 - void *module_core;
62581 + void *module_core_rx, *module_core_rw;
62582
62583 /* Here are the sizes of the init and core sections */
62584 - unsigned int init_size, core_size;
62585 + unsigned int init_size_rw, core_size_rw;
62586
62587 /* The size of the executable code in each section. */
62588 - unsigned int init_text_size, core_text_size;
62589 -
62590 - /* Size of RO sections of the module (text+rodata) */
62591 - unsigned int init_ro_size, core_ro_size;
62592 + unsigned int init_size_rx, core_size_rx;
62593
62594 /* Arch-specific module values */
62595 struct mod_arch_specific arch;
62596 @@ -343,6 +341,10 @@ struct module
62597 #ifdef CONFIG_EVENT_TRACING
62598 struct ftrace_event_call **trace_events;
62599 unsigned int num_trace_events;
62600 + struct file_operations trace_id;
62601 + struct file_operations trace_enable;
62602 + struct file_operations trace_format;
62603 + struct file_operations trace_filter;
62604 #endif
62605 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62606 unsigned int num_ftrace_callsites;
62607 @@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
62608 bool is_module_percpu_address(unsigned long addr);
62609 bool is_module_text_address(unsigned long addr);
62610
62611 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62612 +{
62613 +
62614 +#ifdef CONFIG_PAX_KERNEXEC
62615 + if (ktla_ktva(addr) >= (unsigned long)start &&
62616 + ktla_ktva(addr) < (unsigned long)start + size)
62617 + return 1;
62618 +#endif
62619 +
62620 + return ((void *)addr >= start && (void *)addr < start + size);
62621 +}
62622 +
62623 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62624 +{
62625 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62626 +}
62627 +
62628 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62629 +{
62630 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62631 +}
62632 +
62633 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62634 +{
62635 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62636 +}
62637 +
62638 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62639 +{
62640 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62641 +}
62642 +
62643 static inline int within_module_core(unsigned long addr, struct module *mod)
62644 {
62645 - return (unsigned long)mod->module_core <= addr &&
62646 - addr < (unsigned long)mod->module_core + mod->core_size;
62647 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62648 }
62649
62650 static inline int within_module_init(unsigned long addr, struct module *mod)
62651 {
62652 - return (unsigned long)mod->module_init <= addr &&
62653 - addr < (unsigned long)mod->module_init + mod->init_size;
62654 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62655 }
62656
62657 /* Search for module by name: must hold module_mutex. */
62658 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62659 index b2be02e..72d2f78 100644
62660 --- a/include/linux/moduleloader.h
62661 +++ b/include/linux/moduleloader.h
62662 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62663
62664 /* Allocator used for allocating struct module, core sections and init
62665 sections. Returns NULL on failure. */
62666 -void *module_alloc(unsigned long size);
62667 +void *module_alloc(unsigned long size) __size_overflow(1);
62668 +
62669 +#ifdef CONFIG_PAX_KERNEXEC
62670 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
62671 +#else
62672 +#define module_alloc_exec(x) module_alloc(x)
62673 +#endif
62674
62675 /* Free memory returned from module_alloc. */
62676 void module_free(struct module *mod, void *module_region);
62677
62678 +#ifdef CONFIG_PAX_KERNEXEC
62679 +void module_free_exec(struct module *mod, void *module_region);
62680 +#else
62681 +#define module_free_exec(x, y) module_free((x), (y))
62682 +#endif
62683 +
62684 /* Apply the given relocation to the (simplified) ELF. Return -error
62685 or 0. */
62686 int apply_relocate(Elf_Shdr *sechdrs,
62687 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62688 index c47f4d6..23f9bdb 100644
62689 --- a/include/linux/moduleparam.h
62690 +++ b/include/linux/moduleparam.h
62691 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
62692 * @len is usually just sizeof(string).
62693 */
62694 #define module_param_string(name, string, len, perm) \
62695 - static const struct kparam_string __param_string_##name \
62696 + static const struct kparam_string __param_string_##name __used \
62697 = { len, string }; \
62698 __module_param_call(MODULE_PARAM_PREFIX, name, \
62699 &param_ops_string, \
62700 @@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62701 */
62702 #define module_param_array_named(name, array, type, nump, perm) \
62703 param_check_##type(name, &(array)[0]); \
62704 - static const struct kparam_array __param_arr_##name \
62705 + static const struct kparam_array __param_arr_##name __used \
62706 = { .max = ARRAY_SIZE(array), .num = nump, \
62707 .ops = &param_ops_##type, \
62708 .elemsize = sizeof(array[0]), .elem = array }; \
62709 diff --git a/include/linux/namei.h b/include/linux/namei.h
62710 index ffc0213..2c1f2cb 100644
62711 --- a/include/linux/namei.h
62712 +++ b/include/linux/namei.h
62713 @@ -24,7 +24,7 @@ struct nameidata {
62714 unsigned seq;
62715 int last_type;
62716 unsigned depth;
62717 - char *saved_names[MAX_NESTED_LINKS + 1];
62718 + const char *saved_names[MAX_NESTED_LINKS + 1];
62719
62720 /* Intent data */
62721 union {
62722 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62723 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62724 extern void unlock_rename(struct dentry *, struct dentry *);
62725
62726 -static inline void nd_set_link(struct nameidata *nd, char *path)
62727 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62728 {
62729 nd->saved_names[nd->depth] = path;
62730 }
62731
62732 -static inline char *nd_get_link(struct nameidata *nd)
62733 +static inline const char *nd_get_link(const struct nameidata *nd)
62734 {
62735 return nd->saved_names[nd->depth];
62736 }
62737 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62738 index 0eac07c..a59f6a8 100644
62739 --- a/include/linux/netdevice.h
62740 +++ b/include/linux/netdevice.h
62741 @@ -1002,6 +1002,7 @@ struct net_device_ops {
62742 int (*ndo_neigh_construct)(struct neighbour *n);
62743 void (*ndo_neigh_destroy)(struct neighbour *n);
62744 };
62745 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62746
62747 /*
62748 * The DEVICE structure.
62749 @@ -1063,7 +1064,7 @@ struct net_device {
62750 int iflink;
62751
62752 struct net_device_stats stats;
62753 - atomic_long_t rx_dropped; /* dropped packets by core network
62754 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62755 * Do not use this in drivers.
62756 */
62757
62758 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62759 new file mode 100644
62760 index 0000000..33f4af8
62761 --- /dev/null
62762 +++ b/include/linux/netfilter/xt_gradm.h
62763 @@ -0,0 +1,9 @@
62764 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62765 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62766 +
62767 +struct xt_gradm_mtinfo {
62768 + __u16 flags;
62769 + __u16 invflags;
62770 +};
62771 +
62772 +#endif
62773 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62774 index c65a18a..0c05f3a 100644
62775 --- a/include/linux/of_pdt.h
62776 +++ b/include/linux/of_pdt.h
62777 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62778
62779 /* return 0 on success; fill in 'len' with number of bytes in path */
62780 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62781 -};
62782 +} __no_const;
62783
62784 extern void *prom_early_alloc(unsigned long size);
62785
62786 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62787 index a4c5624..2dabfb7 100644
62788 --- a/include/linux/oprofile.h
62789 +++ b/include/linux/oprofile.h
62790 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62791 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62792 char const * name, ulong * val);
62793
62794 -/** Create a file for read-only access to an atomic_t. */
62795 +/** Create a file for read-only access to an atomic_unchecked_t. */
62796 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62797 - char const * name, atomic_t * val);
62798 + char const * name, atomic_unchecked_t * val);
62799
62800 /** create a directory */
62801 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62802 @@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
62803 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
62804 * Returns 0 on success, < 0 on error.
62805 */
62806 -int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
62807 +int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
62808
62809 /** lock for read/write safety */
62810 extern raw_spinlock_t oprofilefs_lock;
62811 diff --git a/include/linux/padata.h b/include/linux/padata.h
62812 index 4633b2f..988bc08 100644
62813 --- a/include/linux/padata.h
62814 +++ b/include/linux/padata.h
62815 @@ -129,7 +129,7 @@ struct parallel_data {
62816 struct padata_instance *pinst;
62817 struct padata_parallel_queue __percpu *pqueue;
62818 struct padata_serial_queue __percpu *squeue;
62819 - atomic_t seq_nr;
62820 + atomic_unchecked_t seq_nr;
62821 atomic_t reorder_objects;
62822 atomic_t refcnt;
62823 unsigned int max_seq_nr;
62824 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62825 index abb2776..d8b8e15 100644
62826 --- a/include/linux/perf_event.h
62827 +++ b/include/linux/perf_event.h
62828 @@ -750,8 +750,8 @@ struct perf_event {
62829
62830 enum perf_event_active_state state;
62831 unsigned int attach_state;
62832 - local64_t count;
62833 - atomic64_t child_count;
62834 + local64_t count; /* PaX: fix it one day */
62835 + atomic64_unchecked_t child_count;
62836
62837 /*
62838 * These are the total time in nanoseconds that the event
62839 @@ -802,8 +802,8 @@ struct perf_event {
62840 * These accumulate total time (in nanoseconds) that children
62841 * events have been enabled and running, respectively.
62842 */
62843 - atomic64_t child_total_time_enabled;
62844 - atomic64_t child_total_time_running;
62845 + atomic64_unchecked_t child_total_time_enabled;
62846 + atomic64_unchecked_t child_total_time_running;
62847
62848 /*
62849 * Protect attach/detach and child_list:
62850 diff --git a/include/linux/personality.h b/include/linux/personality.h
62851 index 8fc7dd1a..c19d89e 100644
62852 --- a/include/linux/personality.h
62853 +++ b/include/linux/personality.h
62854 @@ -44,6 +44,7 @@ enum {
62855 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62856 ADDR_NO_RANDOMIZE | \
62857 ADDR_COMPAT_LAYOUT | \
62858 + ADDR_LIMIT_3GB | \
62859 MMAP_PAGE_ZERO)
62860
62861 /*
62862 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62863 index 77257c9..51d473a 100644
62864 --- a/include/linux/pipe_fs_i.h
62865 +++ b/include/linux/pipe_fs_i.h
62866 @@ -46,9 +46,9 @@ struct pipe_buffer {
62867 struct pipe_inode_info {
62868 wait_queue_head_t wait;
62869 unsigned int nrbufs, curbuf, buffers;
62870 - unsigned int readers;
62871 - unsigned int writers;
62872 - unsigned int waiting_writers;
62873 + atomic_t readers;
62874 + atomic_t writers;
62875 + atomic_t waiting_writers;
62876 unsigned int r_counter;
62877 unsigned int w_counter;
62878 struct page *tmp_page;
62879 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62880 index 609daae..5392427 100644
62881 --- a/include/linux/pm_runtime.h
62882 +++ b/include/linux/pm_runtime.h
62883 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62884
62885 static inline void pm_runtime_mark_last_busy(struct device *dev)
62886 {
62887 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62888 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62889 }
62890
62891 #else /* !CONFIG_PM_RUNTIME */
62892 diff --git a/include/linux/poison.h b/include/linux/poison.h
62893 index 2110a81..13a11bb 100644
62894 --- a/include/linux/poison.h
62895 +++ b/include/linux/poison.h
62896 @@ -19,8 +19,8 @@
62897 * under normal circumstances, used to verify that nobody uses
62898 * non-initialized list entries.
62899 */
62900 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62901 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62902 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62903 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62904
62905 /********** include/linux/timer.h **********/
62906 /*
62907 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62908 index 58969b2..ead129b 100644
62909 --- a/include/linux/preempt.h
62910 +++ b/include/linux/preempt.h
62911 @@ -123,7 +123,7 @@ struct preempt_ops {
62912 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62913 void (*sched_out)(struct preempt_notifier *notifier,
62914 struct task_struct *next);
62915 -};
62916 +} __no_const;
62917
62918 /**
62919 * preempt_notifier - key for installing preemption notifiers
62920 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62921 index 85c5073..51fac8b 100644
62922 --- a/include/linux/proc_fs.h
62923 +++ b/include/linux/proc_fs.h
62924 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62925 return proc_create_data(name, mode, parent, proc_fops, NULL);
62926 }
62927
62928 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62929 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62930 +{
62931 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62932 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62933 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62934 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62935 +#else
62936 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62937 +#endif
62938 +}
62939 +
62940 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62941 umode_t mode, struct proc_dir_entry *base,
62942 read_proc_t *read_proc, void * data)
62943 @@ -258,7 +270,7 @@ union proc_op {
62944 int (*proc_show)(struct seq_file *m,
62945 struct pid_namespace *ns, struct pid *pid,
62946 struct task_struct *task);
62947 -};
62948 +} __no_const;
62949
62950 struct ctl_table_header;
62951 struct ctl_table;
62952 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62953 index c2f1f6a..6fdb196 100644
62954 --- a/include/linux/ptrace.h
62955 +++ b/include/linux/ptrace.h
62956 @@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
62957 if (unlikely(ptrace_event_enabled(current, event))) {
62958 current->ptrace_message = message;
62959 ptrace_notify((event << 8) | SIGTRAP);
62960 - } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
62961 + } else if (event == PTRACE_EVENT_EXEC) {
62962 /* legacy EXEC report via SIGTRAP */
62963 - send_sig(SIGTRAP, current, 0);
62964 + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
62965 + send_sig(SIGTRAP, current, 0);
62966 }
62967 }
62968
62969 diff --git a/include/linux/random.h b/include/linux/random.h
62970 index 8f74538..02a1012 100644
62971 --- a/include/linux/random.h
62972 +++ b/include/linux/random.h
62973 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62974
62975 u32 prandom32(struct rnd_state *);
62976
62977 +static inline unsigned long pax_get_random_long(void)
62978 +{
62979 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62980 +}
62981 +
62982 /*
62983 * Handle minimum values for seeds
62984 */
62985 static inline u32 __seed(u32 x, u32 m)
62986 {
62987 - return (x < m) ? x + m : x;
62988 + return (x <= m) ? x + m + 1 : x;
62989 }
62990
62991 /**
62992 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62993 index e0879a7..a12f962 100644
62994 --- a/include/linux/reboot.h
62995 +++ b/include/linux/reboot.h
62996 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62997 * Architecture-specific implementations of sys_reboot commands.
62998 */
62999
63000 -extern void machine_restart(char *cmd);
63001 -extern void machine_halt(void);
63002 -extern void machine_power_off(void);
63003 +extern void machine_restart(char *cmd) __noreturn;
63004 +extern void machine_halt(void) __noreturn;
63005 +extern void machine_power_off(void) __noreturn;
63006
63007 extern void machine_shutdown(void);
63008 struct pt_regs;
63009 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63010 */
63011
63012 extern void kernel_restart_prepare(char *cmd);
63013 -extern void kernel_restart(char *cmd);
63014 -extern void kernel_halt(void);
63015 -extern void kernel_power_off(void);
63016 +extern void kernel_restart(char *cmd) __noreturn;
63017 +extern void kernel_halt(void) __noreturn;
63018 +extern void kernel_power_off(void) __noreturn;
63019
63020 extern int C_A_D; /* for sysctl */
63021 void ctrl_alt_del(void);
63022 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63023 * Emergency restart, callable from an interrupt handler.
63024 */
63025
63026 -extern void emergency_restart(void);
63027 +extern void emergency_restart(void) __noreturn;
63028 #include <asm/emergency-restart.h>
63029
63030 #endif
63031 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
63032 index 2213ddc..650212a 100644
63033 --- a/include/linux/reiserfs_fs.h
63034 +++ b/include/linux/reiserfs_fs.h
63035 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63036 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63037
63038 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63039 -#define get_generation(s) atomic_read (&fs_generation(s))
63040 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63041 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63042 #define __fs_changed(gen,s) (gen != get_generation (s))
63043 #define fs_changed(gen,s) \
63044 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
63045 index 8c9e85c..1698e9a 100644
63046 --- a/include/linux/reiserfs_fs_sb.h
63047 +++ b/include/linux/reiserfs_fs_sb.h
63048 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
63049 /* Comment? -Hans */
63050 wait_queue_head_t s_wait;
63051 /* To be obsoleted soon by per buffer seals.. -Hans */
63052 - atomic_t s_generation_counter; // increased by one every time the
63053 + atomic_unchecked_t s_generation_counter; // increased by one every time the
63054 // tree gets re-balanced
63055 unsigned long s_properties; /* File system properties. Currently holds
63056 on-disk FS format */
63057 diff --git a/include/linux/relay.h b/include/linux/relay.h
63058 index a822fd7..62b70f6 100644
63059 --- a/include/linux/relay.h
63060 +++ b/include/linux/relay.h
63061 @@ -159,7 +159,7 @@ struct rchan_callbacks
63062 * The callback should return 0 if successful, negative if not.
63063 */
63064 int (*remove_buf_file)(struct dentry *dentry);
63065 -};
63066 +} __no_const;
63067
63068 /*
63069 * CONFIG_RELAY kernel API, kernel/relay.c
63070 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63071 index c6c6084..5bf1212 100644
63072 --- a/include/linux/rfkill.h
63073 +++ b/include/linux/rfkill.h
63074 @@ -147,6 +147,7 @@ struct rfkill_ops {
63075 void (*query)(struct rfkill *rfkill, void *data);
63076 int (*set_block)(void *data, bool blocked);
63077 };
63078 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63079
63080 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63081 /**
63082 diff --git a/include/linux/rio.h b/include/linux/rio.h
63083 index 4d50611..c6858a2 100644
63084 --- a/include/linux/rio.h
63085 +++ b/include/linux/rio.h
63086 @@ -315,7 +315,7 @@ struct rio_ops {
63087 int mbox, void *buffer, size_t len);
63088 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63089 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63090 -};
63091 +} __no_const;
63092
63093 #define RIO_RESOURCE_MEM 0x00000100
63094 #define RIO_RESOURCE_DOORBELL 0x00000200
63095 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63096 index 1cdd62a..e399f0d 100644
63097 --- a/include/linux/rmap.h
63098 +++ b/include/linux/rmap.h
63099 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63100 void anon_vma_init(void); /* create anon_vma_cachep */
63101 int anon_vma_prepare(struct vm_area_struct *);
63102 void unlink_anon_vmas(struct vm_area_struct *);
63103 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63104 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63105 void anon_vma_moveto_tail(struct vm_area_struct *);
63106 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63107 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63108 void __anon_vma_link(struct vm_area_struct *);
63109
63110 static inline void anon_vma_merge(struct vm_area_struct *vma,
63111 diff --git a/include/linux/sched.h b/include/linux/sched.h
63112 index 0657368..765f70f 100644
63113 --- a/include/linux/sched.h
63114 +++ b/include/linux/sched.h
63115 @@ -101,6 +101,7 @@ struct bio_list;
63116 struct fs_struct;
63117 struct perf_event_context;
63118 struct blk_plug;
63119 +struct linux_binprm;
63120
63121 /*
63122 * List of flags we want to share for kernel threads,
63123 @@ -382,10 +383,13 @@ struct user_namespace;
63124 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63125
63126 extern int sysctl_max_map_count;
63127 +extern unsigned long sysctl_heap_stack_gap;
63128
63129 #include <linux/aio.h>
63130
63131 #ifdef CONFIG_MMU
63132 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63133 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63134 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63135 extern unsigned long
63136 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63137 @@ -631,6 +635,17 @@ struct signal_struct {
63138 #ifdef CONFIG_TASKSTATS
63139 struct taskstats *stats;
63140 #endif
63141 +
63142 +#ifdef CONFIG_GRKERNSEC
63143 + u32 curr_ip;
63144 + u32 saved_ip;
63145 + u32 gr_saddr;
63146 + u32 gr_daddr;
63147 + u16 gr_sport;
63148 + u16 gr_dport;
63149 + u8 used_accept:1;
63150 +#endif
63151 +
63152 #ifdef CONFIG_AUDIT
63153 unsigned audit_tty;
63154 struct tty_audit_buf *tty_audit_buf;
63155 @@ -714,6 +729,11 @@ struct user_struct {
63156 struct key *session_keyring; /* UID's default session keyring */
63157 #endif
63158
63159 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63160 + unsigned int banned;
63161 + unsigned long ban_expires;
63162 +#endif
63163 +
63164 /* Hash table maintenance information */
63165 struct hlist_node uidhash_node;
63166 uid_t uid;
63167 @@ -1354,8 +1374,8 @@ struct task_struct {
63168 struct list_head thread_group;
63169
63170 struct completion *vfork_done; /* for vfork() */
63171 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63172 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63173 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63174 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63175
63176 cputime_t utime, stime, utimescaled, stimescaled;
63177 cputime_t gtime;
63178 @@ -1371,13 +1391,6 @@ struct task_struct {
63179 struct task_cputime cputime_expires;
63180 struct list_head cpu_timers[3];
63181
63182 -/* process credentials */
63183 - const struct cred __rcu *real_cred; /* objective and real subjective task
63184 - * credentials (COW) */
63185 - const struct cred __rcu *cred; /* effective (overridable) subjective task
63186 - * credentials (COW) */
63187 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63188 -
63189 char comm[TASK_COMM_LEN]; /* executable name excluding path
63190 - access with [gs]et_task_comm (which lock
63191 it with task_lock())
63192 @@ -1394,8 +1407,16 @@ struct task_struct {
63193 #endif
63194 /* CPU-specific state of this task */
63195 struct thread_struct thread;
63196 +/* thread_info moved to task_struct */
63197 +#ifdef CONFIG_X86
63198 + struct thread_info tinfo;
63199 +#endif
63200 /* filesystem information */
63201 struct fs_struct *fs;
63202 +
63203 + const struct cred __rcu *cred; /* effective (overridable) subjective task
63204 + * credentials (COW) */
63205 +
63206 /* open file information */
63207 struct files_struct *files;
63208 /* namespaces */
63209 @@ -1442,6 +1463,11 @@ struct task_struct {
63210 struct rt_mutex_waiter *pi_blocked_on;
63211 #endif
63212
63213 +/* process credentials */
63214 + const struct cred __rcu *real_cred; /* objective and real subjective task
63215 + * credentials (COW) */
63216 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63217 +
63218 #ifdef CONFIG_DEBUG_MUTEXES
63219 /* mutex deadlock detection */
63220 struct mutex_waiter *blocked_on;
63221 @@ -1558,6 +1584,27 @@ struct task_struct {
63222 unsigned long default_timer_slack_ns;
63223
63224 struct list_head *scm_work_list;
63225 +
63226 +#ifdef CONFIG_GRKERNSEC
63227 + /* grsecurity */
63228 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63229 + u64 exec_id;
63230 +#endif
63231 +#ifdef CONFIG_GRKERNSEC_SETXID
63232 + const struct cred *delayed_cred;
63233 +#endif
63234 + struct dentry *gr_chroot_dentry;
63235 + struct acl_subject_label *acl;
63236 + struct acl_role_label *role;
63237 + struct file *exec_file;
63238 + u16 acl_role_id;
63239 + /* is this the task that authenticated to the special role */
63240 + u8 acl_sp_role;
63241 + u8 is_writable;
63242 + u8 brute;
63243 + u8 gr_is_chrooted;
63244 +#endif
63245 +
63246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63247 /* Index of current stored address in ret_stack */
63248 int curr_ret_stack;
63249 @@ -1592,6 +1639,51 @@ struct task_struct {
63250 #endif
63251 };
63252
63253 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63254 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63255 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63256 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63257 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63258 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63259 +
63260 +#ifdef CONFIG_PAX_SOFTMODE
63261 +extern int pax_softmode;
63262 +#endif
63263 +
63264 +extern int pax_check_flags(unsigned long *);
63265 +
63266 +/* if tsk != current then task_lock must be held on it */
63267 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63268 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
63269 +{
63270 + if (likely(tsk->mm))
63271 + return tsk->mm->pax_flags;
63272 + else
63273 + return 0UL;
63274 +}
63275 +
63276 +/* if tsk != current then task_lock must be held on it */
63277 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63278 +{
63279 + if (likely(tsk->mm)) {
63280 + tsk->mm->pax_flags = flags;
63281 + return 0;
63282 + }
63283 + return -EINVAL;
63284 +}
63285 +#endif
63286 +
63287 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63288 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
63289 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63290 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63291 +#endif
63292 +
63293 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63294 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63295 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
63296 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
63297 +
63298 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63299 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63300
63301 @@ -2104,7 +2196,9 @@ void yield(void);
63302 extern struct exec_domain default_exec_domain;
63303
63304 union thread_union {
63305 +#ifndef CONFIG_X86
63306 struct thread_info thread_info;
63307 +#endif
63308 unsigned long stack[THREAD_SIZE/sizeof(long)];
63309 };
63310
63311 @@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
63312 */
63313
63314 extern struct task_struct *find_task_by_vpid(pid_t nr);
63315 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63316 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63317 struct pid_namespace *ns);
63318
63319 @@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63320 extern void exit_itimers(struct signal_struct *);
63321 extern void flush_itimer_signals(void);
63322
63323 -extern void do_group_exit(int);
63324 +extern __noreturn void do_group_exit(int);
63325
63326 extern void daemonize(const char *, ...);
63327 extern int allow_signal(int);
63328 @@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63329
63330 #endif
63331
63332 -static inline int object_is_on_stack(void *obj)
63333 +static inline int object_starts_on_stack(void *obj)
63334 {
63335 - void *stack = task_stack_page(current);
63336 + const void *stack = task_stack_page(current);
63337
63338 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63339 }
63340
63341 +#ifdef CONFIG_PAX_USERCOPY
63342 +extern int object_is_on_stack(const void *obj, unsigned long len);
63343 +#endif
63344 +
63345 extern void thread_info_cache_init(void);
63346
63347 #ifdef CONFIG_DEBUG_STACK_USAGE
63348 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63349 index 899fbb4..1cb4138 100644
63350 --- a/include/linux/screen_info.h
63351 +++ b/include/linux/screen_info.h
63352 @@ -43,7 +43,8 @@ struct screen_info {
63353 __u16 pages; /* 0x32 */
63354 __u16 vesa_attributes; /* 0x34 */
63355 __u32 capabilities; /* 0x36 */
63356 - __u8 _reserved[6]; /* 0x3a */
63357 + __u16 vesapm_size; /* 0x3a */
63358 + __u8 _reserved[4]; /* 0x3c */
63359 } __attribute__((packed));
63360
63361 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63362 diff --git a/include/linux/security.h b/include/linux/security.h
63363 index 83c18e8..2d98860 100644
63364 --- a/include/linux/security.h
63365 +++ b/include/linux/security.h
63366 @@ -37,6 +37,7 @@
63367 #include <linux/xfrm.h>
63368 #include <linux/slab.h>
63369 #include <linux/xattr.h>
63370 +#include <linux/grsecurity.h>
63371 #include <net/flow.h>
63372
63373 /* Maximum number of letters for an LSM name string */
63374 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63375 index 44f1514..2bbf6c1 100644
63376 --- a/include/linux/seq_file.h
63377 +++ b/include/linux/seq_file.h
63378 @@ -24,6 +24,9 @@ struct seq_file {
63379 struct mutex lock;
63380 const struct seq_operations *op;
63381 int poll_event;
63382 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63383 + u64 exec_id;
63384 +#endif
63385 void *private;
63386 };
63387
63388 @@ -33,6 +36,7 @@ struct seq_operations {
63389 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63390 int (*show) (struct seq_file *m, void *v);
63391 };
63392 +typedef struct seq_operations __no_const seq_operations_no_const;
63393
63394 #define SEQ_SKIP 1
63395
63396 diff --git a/include/linux/shm.h b/include/linux/shm.h
63397 index 92808b8..c28cac4 100644
63398 --- a/include/linux/shm.h
63399 +++ b/include/linux/shm.h
63400 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63401
63402 /* The task created the shm object. NULL if the task is dead. */
63403 struct task_struct *shm_creator;
63404 +#ifdef CONFIG_GRKERNSEC
63405 + time_t shm_createtime;
63406 + pid_t shm_lapid;
63407 +#endif
63408 };
63409
63410 /* shm_mode upper byte flags */
63411 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63412 index ae86ade..2b51468 100644
63413 --- a/include/linux/skbuff.h
63414 +++ b/include/linux/skbuff.h
63415 @@ -654,7 +654,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63416 */
63417 static inline int skb_queue_empty(const struct sk_buff_head *list)
63418 {
63419 - return list->next == (struct sk_buff *)list;
63420 + return list->next == (const struct sk_buff *)list;
63421 }
63422
63423 /**
63424 @@ -667,7 +667,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63425 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63426 const struct sk_buff *skb)
63427 {
63428 - return skb->next == (struct sk_buff *)list;
63429 + return skb->next == (const struct sk_buff *)list;
63430 }
63431
63432 /**
63433 @@ -680,7 +680,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63434 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63435 const struct sk_buff *skb)
63436 {
63437 - return skb->prev == (struct sk_buff *)list;
63438 + return skb->prev == (const struct sk_buff *)list;
63439 }
63440
63441 /**
63442 @@ -1545,7 +1545,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63443 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63444 */
63445 #ifndef NET_SKB_PAD
63446 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63447 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63448 #endif
63449
63450 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63451 diff --git a/include/linux/slab.h b/include/linux/slab.h
63452 index 573c809..07e1f43 100644
63453 --- a/include/linux/slab.h
63454 +++ b/include/linux/slab.h
63455 @@ -11,12 +11,20 @@
63456
63457 #include <linux/gfp.h>
63458 #include <linux/types.h>
63459 +#include <linux/err.h>
63460
63461 /*
63462 * Flags to pass to kmem_cache_create().
63463 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63464 */
63465 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63466 +
63467 +#ifdef CONFIG_PAX_USERCOPY
63468 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63469 +#else
63470 +#define SLAB_USERCOPY 0x00000000UL
63471 +#endif
63472 +
63473 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63474 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63475 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63476 @@ -87,10 +95,13 @@
63477 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63478 * Both make kfree a no-op.
63479 */
63480 -#define ZERO_SIZE_PTR ((void *)16)
63481 +#define ZERO_SIZE_PTR \
63482 +({ \
63483 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63484 + (void *)(-MAX_ERRNO-1L); \
63485 +})
63486
63487 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63488 - (unsigned long)ZERO_SIZE_PTR)
63489 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63490
63491 /*
63492 * struct kmem_cache related prototypes
63493 @@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
63494 /*
63495 * Common kmalloc functions provided by all allocators
63496 */
63497 -void * __must_check __krealloc(const void *, size_t, gfp_t);
63498 -void * __must_check krealloc(const void *, size_t, gfp_t);
63499 +void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63500 +void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63501 void kfree(const void *);
63502 void kzfree(const void *);
63503 size_t ksize(const void *);
63504 +void check_object_size(const void *ptr, unsigned long n, bool to);
63505
63506 /*
63507 * Allocator specific definitions. These are mainly used to establish optimized
63508 @@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63509 */
63510 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63511 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63512 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63513 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63514 #define kmalloc_track_caller(size, flags) \
63515 __kmalloc_track_caller(size, flags, _RET_IP_)
63516 #else
63517 @@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63518 */
63519 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63520 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63521 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63522 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63523 #define kmalloc_node_track_caller(size, flags, node) \
63524 __kmalloc_node_track_caller(size, flags, node, \
63525 _RET_IP_)
63526 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63527 index fbd1117..c0bd874 100644
63528 --- a/include/linux/slab_def.h
63529 +++ b/include/linux/slab_def.h
63530 @@ -66,10 +66,10 @@ struct kmem_cache {
63531 unsigned long node_allocs;
63532 unsigned long node_frees;
63533 unsigned long node_overflow;
63534 - atomic_t allochit;
63535 - atomic_t allocmiss;
63536 - atomic_t freehit;
63537 - atomic_t freemiss;
63538 + atomic_unchecked_t allochit;
63539 + atomic_unchecked_t allocmiss;
63540 + atomic_unchecked_t freehit;
63541 + atomic_unchecked_t freemiss;
63542
63543 /*
63544 * If debugging is enabled, then the allocator can add additional
63545 @@ -107,7 +107,7 @@ struct cache_sizes {
63546 extern struct cache_sizes malloc_sizes[];
63547
63548 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63549 -void *__kmalloc(size_t size, gfp_t flags);
63550 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63551
63552 #ifdef CONFIG_TRACING
63553 extern void *kmem_cache_alloc_trace(size_t size,
63554 @@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
63555 }
63556 #endif
63557
63558 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63559 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63560 {
63561 struct kmem_cache *cachep;
63562 @@ -160,7 +161,7 @@ found:
63563 }
63564
63565 #ifdef CONFIG_NUMA
63566 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63567 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63568 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63569
63570 #ifdef CONFIG_TRACING
63571 @@ -179,6 +180,7 @@ kmem_cache_alloc_node_trace(size_t size,
63572 }
63573 #endif
63574
63575 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63576 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63577 {
63578 struct kmem_cache *cachep;
63579 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63580 index 0ec00b3..65e7e0e 100644
63581 --- a/include/linux/slob_def.h
63582 +++ b/include/linux/slob_def.h
63583 @@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63584 return kmem_cache_alloc_node(cachep, flags, -1);
63585 }
63586
63587 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63588 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63589
63590 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63591 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63592 {
63593 return __kmalloc_node(size, flags, node);
63594 @@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63595 * kmalloc is the normal method of allocating memory
63596 * in the kernel.
63597 */
63598 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63599 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63600 {
63601 return __kmalloc_node(size, flags, -1);
63602 }
63603
63604 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63605 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63606 {
63607 return kmalloc(size, flags);
63608 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63609 index a32bcfd..d26bd6e 100644
63610 --- a/include/linux/slub_def.h
63611 +++ b/include/linux/slub_def.h
63612 @@ -89,7 +89,7 @@ struct kmem_cache {
63613 struct kmem_cache_order_objects max;
63614 struct kmem_cache_order_objects min;
63615 gfp_t allocflags; /* gfp flags to use on each alloc */
63616 - int refcount; /* Refcount for slab cache destroy */
63617 + atomic_t refcount; /* Refcount for slab cache destroy */
63618 void (*ctor)(void *);
63619 int inuse; /* Offset to metadata */
63620 int align; /* Alignment */
63621 @@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size)
63622 * This ought to end up with a global pointer to the right cache
63623 * in kmalloc_caches.
63624 */
63625 +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
63626 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63627 {
63628 int index = kmalloc_index(size);
63629 @@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63630 }
63631
63632 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63633 -void *__kmalloc(size_t size, gfp_t flags);
63634 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63635
63636 static __always_inline void *
63637 +kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
63638 +static __always_inline void *
63639 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63640 {
63641 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
63642 @@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63643 }
63644 #endif
63645
63646 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63647 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63648 {
63649 unsigned int order = get_order(size);
63650 return kmalloc_order_trace(size, flags, order);
63651 }
63652
63653 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63654 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63655 {
63656 if (__builtin_constant_p(size)) {
63657 @@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63658 }
63659
63660 #ifdef CONFIG_NUMA
63661 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63662 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63663 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63664
63665 #ifdef CONFIG_TRACING
63666 @@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
63667 }
63668 #endif
63669
63670 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63671 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63672 {
63673 if (__builtin_constant_p(size) &&
63674 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63675 index de8832d..0147b46 100644
63676 --- a/include/linux/sonet.h
63677 +++ b/include/linux/sonet.h
63678 @@ -61,7 +61,7 @@ struct sonet_stats {
63679 #include <linux/atomic.h>
63680
63681 struct k_sonet_stats {
63682 -#define __HANDLE_ITEM(i) atomic_t i
63683 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63684 __SONET_ITEMS
63685 #undef __HANDLE_ITEM
63686 };
63687 diff --git a/include/linux/stddef.h b/include/linux/stddef.h
63688 index 6a40c76..1747b67 100644
63689 --- a/include/linux/stddef.h
63690 +++ b/include/linux/stddef.h
63691 @@ -3,14 +3,10 @@
63692
63693 #include <linux/compiler.h>
63694
63695 +#ifdef __KERNEL__
63696 +
63697 #undef NULL
63698 -#if defined(__cplusplus)
63699 -#define NULL 0
63700 -#else
63701 #define NULL ((void *)0)
63702 -#endif
63703 -
63704 -#ifdef __KERNEL__
63705
63706 enum {
63707 false = 0,
63708 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63709 index 2c5993a..b0e79f0 100644
63710 --- a/include/linux/sunrpc/clnt.h
63711 +++ b/include/linux/sunrpc/clnt.h
63712 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63713 {
63714 switch (sap->sa_family) {
63715 case AF_INET:
63716 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63717 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63718 case AF_INET6:
63719 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63720 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63721 }
63722 return 0;
63723 }
63724 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63725 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63726 const struct sockaddr *src)
63727 {
63728 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63729 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63730 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63731
63732 dsin->sin_family = ssin->sin_family;
63733 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63734 if (sa->sa_family != AF_INET6)
63735 return 0;
63736
63737 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63738 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63739 }
63740
63741 #endif /* __KERNEL__ */
63742 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63743 index e775689..9e206d9 100644
63744 --- a/include/linux/sunrpc/sched.h
63745 +++ b/include/linux/sunrpc/sched.h
63746 @@ -105,6 +105,7 @@ struct rpc_call_ops {
63747 void (*rpc_call_done)(struct rpc_task *, void *);
63748 void (*rpc_release)(void *);
63749 };
63750 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63751
63752 struct rpc_task_setup {
63753 struct rpc_task *task;
63754 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63755 index c14fe86..393245e 100644
63756 --- a/include/linux/sunrpc/svc_rdma.h
63757 +++ b/include/linux/sunrpc/svc_rdma.h
63758 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63759 extern unsigned int svcrdma_max_requests;
63760 extern unsigned int svcrdma_max_req_size;
63761
63762 -extern atomic_t rdma_stat_recv;
63763 -extern atomic_t rdma_stat_read;
63764 -extern atomic_t rdma_stat_write;
63765 -extern atomic_t rdma_stat_sq_starve;
63766 -extern atomic_t rdma_stat_rq_starve;
63767 -extern atomic_t rdma_stat_rq_poll;
63768 -extern atomic_t rdma_stat_rq_prod;
63769 -extern atomic_t rdma_stat_sq_poll;
63770 -extern atomic_t rdma_stat_sq_prod;
63771 +extern atomic_unchecked_t rdma_stat_recv;
63772 +extern atomic_unchecked_t rdma_stat_read;
63773 +extern atomic_unchecked_t rdma_stat_write;
63774 +extern atomic_unchecked_t rdma_stat_sq_starve;
63775 +extern atomic_unchecked_t rdma_stat_rq_starve;
63776 +extern atomic_unchecked_t rdma_stat_rq_poll;
63777 +extern atomic_unchecked_t rdma_stat_rq_prod;
63778 +extern atomic_unchecked_t rdma_stat_sq_poll;
63779 +extern atomic_unchecked_t rdma_stat_sq_prod;
63780
63781 #define RPCRDMA_VERSION 1
63782
63783 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63784 index bb9127d..34ab358 100644
63785 --- a/include/linux/sysctl.h
63786 +++ b/include/linux/sysctl.h
63787 @@ -155,7 +155,11 @@ enum
63788 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63789 };
63790
63791 -
63792 +#ifdef CONFIG_PAX_SOFTMODE
63793 +enum {
63794 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63795 +};
63796 +#endif
63797
63798 /* CTL_VM names: */
63799 enum
63800 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63801
63802 extern int proc_dostring(struct ctl_table *, int,
63803 void __user *, size_t *, loff_t *);
63804 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63805 + void __user *, size_t *, loff_t *);
63806 extern int proc_dointvec(struct ctl_table *, int,
63807 void __user *, size_t *, loff_t *);
63808 extern int proc_dointvec_minmax(struct ctl_table *, int,
63809 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
63810 index a71a292..51bd91d 100644
63811 --- a/include/linux/tracehook.h
63812 +++ b/include/linux/tracehook.h
63813 @@ -54,12 +54,12 @@ struct linux_binprm;
63814 /*
63815 * ptrace report for syscall entry and exit looks identical.
63816 */
63817 -static inline void ptrace_report_syscall(struct pt_regs *regs)
63818 +static inline int ptrace_report_syscall(struct pt_regs *regs)
63819 {
63820 int ptrace = current->ptrace;
63821
63822 if (!(ptrace & PT_PTRACED))
63823 - return;
63824 + return 0;
63825
63826 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
63827
63828 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63829 send_sig(current->exit_code, current, 1);
63830 current->exit_code = 0;
63831 }
63832 +
63833 + return fatal_signal_pending(current);
63834 }
63835
63836 /**
63837 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63838 static inline __must_check int tracehook_report_syscall_entry(
63839 struct pt_regs *regs)
63840 {
63841 - ptrace_report_syscall(regs);
63842 - return 0;
63843 + return ptrace_report_syscall(regs);
63844 }
63845
63846 /**
63847 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63848 index ff7dc08..893e1bd 100644
63849 --- a/include/linux/tty_ldisc.h
63850 +++ b/include/linux/tty_ldisc.h
63851 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63852
63853 struct module *owner;
63854
63855 - int refcount;
63856 + atomic_t refcount;
63857 };
63858
63859 struct tty_ldisc {
63860 diff --git a/include/linux/types.h b/include/linux/types.h
63861 index e5fa503..df6e8a4 100644
63862 --- a/include/linux/types.h
63863 +++ b/include/linux/types.h
63864 @@ -214,10 +214,26 @@ typedef struct {
63865 int counter;
63866 } atomic_t;
63867
63868 +#ifdef CONFIG_PAX_REFCOUNT
63869 +typedef struct {
63870 + int counter;
63871 +} atomic_unchecked_t;
63872 +#else
63873 +typedef atomic_t atomic_unchecked_t;
63874 +#endif
63875 +
63876 #ifdef CONFIG_64BIT
63877 typedef struct {
63878 long counter;
63879 } atomic64_t;
63880 +
63881 +#ifdef CONFIG_PAX_REFCOUNT
63882 +typedef struct {
63883 + long counter;
63884 +} atomic64_unchecked_t;
63885 +#else
63886 +typedef atomic64_t atomic64_unchecked_t;
63887 +#endif
63888 #endif
63889
63890 struct list_head {
63891 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63892 index 5ca0951..53a2fff 100644
63893 --- a/include/linux/uaccess.h
63894 +++ b/include/linux/uaccess.h
63895 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63896 long ret; \
63897 mm_segment_t old_fs = get_fs(); \
63898 \
63899 - set_fs(KERNEL_DS); \
63900 pagefault_disable(); \
63901 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63902 - pagefault_enable(); \
63903 + set_fs(KERNEL_DS); \
63904 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63905 set_fs(old_fs); \
63906 + pagefault_enable(); \
63907 ret; \
63908 })
63909
63910 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
63911 * Safely write to address @dst from the buffer at @src. If a kernel fault
63912 * happens, handle that and return -EFAULT.
63913 */
63914 -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
63915 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
63916 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
63917
63918 #endif /* __LINUX_UACCESS_H__ */
63919 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63920 index 99c1b4d..bb94261 100644
63921 --- a/include/linux/unaligned/access_ok.h
63922 +++ b/include/linux/unaligned/access_ok.h
63923 @@ -6,32 +6,32 @@
63924
63925 static inline u16 get_unaligned_le16(const void *p)
63926 {
63927 - return le16_to_cpup((__le16 *)p);
63928 + return le16_to_cpup((const __le16 *)p);
63929 }
63930
63931 static inline u32 get_unaligned_le32(const void *p)
63932 {
63933 - return le32_to_cpup((__le32 *)p);
63934 + return le32_to_cpup((const __le32 *)p);
63935 }
63936
63937 static inline u64 get_unaligned_le64(const void *p)
63938 {
63939 - return le64_to_cpup((__le64 *)p);
63940 + return le64_to_cpup((const __le64 *)p);
63941 }
63942
63943 static inline u16 get_unaligned_be16(const void *p)
63944 {
63945 - return be16_to_cpup((__be16 *)p);
63946 + return be16_to_cpup((const __be16 *)p);
63947 }
63948
63949 static inline u32 get_unaligned_be32(const void *p)
63950 {
63951 - return be32_to_cpup((__be32 *)p);
63952 + return be32_to_cpup((const __be32 *)p);
63953 }
63954
63955 static inline u64 get_unaligned_be64(const void *p)
63956 {
63957 - return be64_to_cpup((__be64 *)p);
63958 + return be64_to_cpup((const __be64 *)p);
63959 }
63960
63961 static inline void put_unaligned_le16(u16 val, void *p)
63962 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63963 index 0d3f988..000f101 100644
63964 --- a/include/linux/usb/renesas_usbhs.h
63965 +++ b/include/linux/usb/renesas_usbhs.h
63966 @@ -39,7 +39,7 @@ enum {
63967 */
63968 struct renesas_usbhs_driver_callback {
63969 int (*notify_hotplug)(struct platform_device *pdev);
63970 -};
63971 +} __no_const;
63972
63973 /*
63974 * callback functions for platform
63975 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63976 * VBUS control is needed for Host
63977 */
63978 int (*set_vbus)(struct platform_device *pdev, int enable);
63979 -};
63980 +} __no_const;
63981
63982 /*
63983 * parameters for renesas usbhs
63984 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63985 index 6f8fbcf..8259001 100644
63986 --- a/include/linux/vermagic.h
63987 +++ b/include/linux/vermagic.h
63988 @@ -25,9 +25,35 @@
63989 #define MODULE_ARCH_VERMAGIC ""
63990 #endif
63991
63992 +#ifdef CONFIG_PAX_REFCOUNT
63993 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63994 +#else
63995 +#define MODULE_PAX_REFCOUNT ""
63996 +#endif
63997 +
63998 +#ifdef CONSTIFY_PLUGIN
63999 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64000 +#else
64001 +#define MODULE_CONSTIFY_PLUGIN ""
64002 +#endif
64003 +
64004 +#ifdef STACKLEAK_PLUGIN
64005 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64006 +#else
64007 +#define MODULE_STACKLEAK_PLUGIN ""
64008 +#endif
64009 +
64010 +#ifdef CONFIG_GRKERNSEC
64011 +#define MODULE_GRSEC "GRSEC "
64012 +#else
64013 +#define MODULE_GRSEC ""
64014 +#endif
64015 +
64016 #define VERMAGIC_STRING \
64017 UTS_RELEASE " " \
64018 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64019 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64020 - MODULE_ARCH_VERMAGIC
64021 + MODULE_ARCH_VERMAGIC \
64022 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64023 + MODULE_GRSEC
64024
64025 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64026 index dcdfc2b..cce598d 100644
64027 --- a/include/linux/vmalloc.h
64028 +++ b/include/linux/vmalloc.h
64029 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64030 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64031 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64032 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64033 +
64034 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64035 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64036 +#endif
64037 +
64038 /* bits [20..32] reserved for arch specific ioremap internals */
64039
64040 /*
64041 @@ -51,18 +56,18 @@ static inline void vmalloc_init(void)
64042 }
64043 #endif
64044
64045 -extern void *vmalloc(unsigned long size);
64046 -extern void *vzalloc(unsigned long size);
64047 -extern void *vmalloc_user(unsigned long size);
64048 -extern void *vmalloc_node(unsigned long size, int node);
64049 -extern void *vzalloc_node(unsigned long size, int node);
64050 -extern void *vmalloc_exec(unsigned long size);
64051 -extern void *vmalloc_32(unsigned long size);
64052 -extern void *vmalloc_32_user(unsigned long size);
64053 -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64054 +extern void *vmalloc(unsigned long size) __size_overflow(1);
64055 +extern void *vzalloc(unsigned long size) __size_overflow(1);
64056 +extern void *vmalloc_user(unsigned long size) __size_overflow(1);
64057 +extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
64058 +extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1);
64059 +extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
64060 +extern void *vmalloc_32(unsigned long size) __size_overflow(1);
64061 +extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
64062 +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
64063 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64064 unsigned long start, unsigned long end, gfp_t gfp_mask,
64065 - pgprot_t prot, int node, void *caller);
64066 + pgprot_t prot, int node, void *caller) __size_overflow(1);
64067 extern void vfree(const void *addr);
64068
64069 extern void *vmap(struct page **pages, unsigned int count,
64070 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64071 extern void free_vm_area(struct vm_struct *area);
64072
64073 /* for /dev/kmem */
64074 -extern long vread(char *buf, char *addr, unsigned long count);
64075 -extern long vwrite(char *buf, char *addr, unsigned long count);
64076 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64077 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64078
64079 /*
64080 * Internals. Dont't use..
64081 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64082 index 65efb92..137adbb 100644
64083 --- a/include/linux/vmstat.h
64084 +++ b/include/linux/vmstat.h
64085 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64086 /*
64087 * Zone based page accounting with per cpu differentials.
64088 */
64089 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64090 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64091
64092 static inline void zone_page_state_add(long x, struct zone *zone,
64093 enum zone_stat_item item)
64094 {
64095 - atomic_long_add(x, &zone->vm_stat[item]);
64096 - atomic_long_add(x, &vm_stat[item]);
64097 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64098 + atomic_long_add_unchecked(x, &vm_stat[item]);
64099 }
64100
64101 static inline unsigned long global_page_state(enum zone_stat_item item)
64102 {
64103 - long x = atomic_long_read(&vm_stat[item]);
64104 + long x = atomic_long_read_unchecked(&vm_stat[item]);
64105 #ifdef CONFIG_SMP
64106 if (x < 0)
64107 x = 0;
64108 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64109 static inline unsigned long zone_page_state(struct zone *zone,
64110 enum zone_stat_item item)
64111 {
64112 - long x = atomic_long_read(&zone->vm_stat[item]);
64113 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64114 #ifdef CONFIG_SMP
64115 if (x < 0)
64116 x = 0;
64117 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64118 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64119 enum zone_stat_item item)
64120 {
64121 - long x = atomic_long_read(&zone->vm_stat[item]);
64122 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64123
64124 #ifdef CONFIG_SMP
64125 int cpu;
64126 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64127
64128 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64129 {
64130 - atomic_long_inc(&zone->vm_stat[item]);
64131 - atomic_long_inc(&vm_stat[item]);
64132 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
64133 + atomic_long_inc_unchecked(&vm_stat[item]);
64134 }
64135
64136 static inline void __inc_zone_page_state(struct page *page,
64137 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64138
64139 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64140 {
64141 - atomic_long_dec(&zone->vm_stat[item]);
64142 - atomic_long_dec(&vm_stat[item]);
64143 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
64144 + atomic_long_dec_unchecked(&vm_stat[item]);
64145 }
64146
64147 static inline void __dec_zone_page_state(struct page *page,
64148 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64149 index e5d1220..ef6e406 100644
64150 --- a/include/linux/xattr.h
64151 +++ b/include/linux/xattr.h
64152 @@ -57,6 +57,11 @@
64153 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
64154 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
64155
64156 +/* User namespace */
64157 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64158 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
64159 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64160 +
64161 #ifdef __KERNEL__
64162
64163 #include <linux/types.h>
64164 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64165 index 4aeff96..b378cdc 100644
64166 --- a/include/media/saa7146_vv.h
64167 +++ b/include/media/saa7146_vv.h
64168 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
64169 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64170
64171 /* the extension can override this */
64172 - struct v4l2_ioctl_ops ops;
64173 + v4l2_ioctl_ops_no_const ops;
64174 /* pointer to the saa7146 core ops */
64175 const struct v4l2_ioctl_ops *core_ops;
64176
64177 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64178 index c7c40f1..4f01585 100644
64179 --- a/include/media/v4l2-dev.h
64180 +++ b/include/media/v4l2-dev.h
64181 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
64182
64183
64184 struct v4l2_file_operations {
64185 - struct module *owner;
64186 + struct module * const owner;
64187 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
64188 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
64189 unsigned int (*poll) (struct file *, struct poll_table_struct *);
64190 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
64191 int (*open) (struct file *);
64192 int (*release) (struct file *);
64193 };
64194 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64195
64196 /*
64197 * Newer version of video_device, handled by videodev2.c
64198 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
64199 index 3f5d60f..44210ed 100644
64200 --- a/include/media/v4l2-ioctl.h
64201 +++ b/include/media/v4l2-ioctl.h
64202 @@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
64203 long (*vidioc_default) (struct file *file, void *fh,
64204 bool valid_prio, int cmd, void *arg);
64205 };
64206 -
64207 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64208
64209 /* v4l debugging and diagnostics */
64210
64211 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64212 index 8d55251..dfe5b0a 100644
64213 --- a/include/net/caif/caif_hsi.h
64214 +++ b/include/net/caif/caif_hsi.h
64215 @@ -98,7 +98,7 @@ struct cfhsi_drv {
64216 void (*rx_done_cb) (struct cfhsi_drv *drv);
64217 void (*wake_up_cb) (struct cfhsi_drv *drv);
64218 void (*wake_down_cb) (struct cfhsi_drv *drv);
64219 -};
64220 +} __no_const;
64221
64222 /* Structure implemented by HSI device. */
64223 struct cfhsi_dev {
64224 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64225 index 9e5425b..8136ffc 100644
64226 --- a/include/net/caif/cfctrl.h
64227 +++ b/include/net/caif/cfctrl.h
64228 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
64229 void (*radioset_rsp)(void);
64230 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64231 struct cflayer *client_layer);
64232 -};
64233 +} __no_const;
64234
64235 /* Link Setup Parameters for CAIF-Links. */
64236 struct cfctrl_link_param {
64237 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
64238 struct cfctrl {
64239 struct cfsrvl serv;
64240 struct cfctrl_rsp res;
64241 - atomic_t req_seq_no;
64242 - atomic_t rsp_seq_no;
64243 + atomic_unchecked_t req_seq_no;
64244 + atomic_unchecked_t rsp_seq_no;
64245 struct list_head list;
64246 /* Protects from simultaneous access to first_req list */
64247 spinlock_t info_list_lock;
64248 diff --git a/include/net/flow.h b/include/net/flow.h
64249 index 6c469db..7743b8e 100644
64250 --- a/include/net/flow.h
64251 +++ b/include/net/flow.h
64252 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64253
64254 extern void flow_cache_flush(void);
64255 extern void flow_cache_flush_deferred(void);
64256 -extern atomic_t flow_cache_genid;
64257 +extern atomic_unchecked_t flow_cache_genid;
64258
64259 #endif
64260 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64261 index b94765e..053f68b 100644
64262 --- a/include/net/inetpeer.h
64263 +++ b/include/net/inetpeer.h
64264 @@ -48,8 +48,8 @@ struct inet_peer {
64265 */
64266 union {
64267 struct {
64268 - atomic_t rid; /* Frag reception counter */
64269 - atomic_t ip_id_count; /* IP ID for the next packet */
64270 + atomic_unchecked_t rid; /* Frag reception counter */
64271 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64272 __u32 tcp_ts;
64273 __u32 tcp_ts_stamp;
64274 };
64275 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64276 more++;
64277 inet_peer_refcheck(p);
64278 do {
64279 - old = atomic_read(&p->ip_id_count);
64280 + old = atomic_read_unchecked(&p->ip_id_count);
64281 new = old + more;
64282 if (!new)
64283 new = 1;
64284 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64285 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64286 return new;
64287 }
64288
64289 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64290 index 10422ef..662570f 100644
64291 --- a/include/net/ip_fib.h
64292 +++ b/include/net/ip_fib.h
64293 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64294
64295 #define FIB_RES_SADDR(net, res) \
64296 ((FIB_RES_NH(res).nh_saddr_genid == \
64297 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64298 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64299 FIB_RES_NH(res).nh_saddr : \
64300 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64301 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64302 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64303 index ebe517f..1bd286b 100644
64304 --- a/include/net/ip_vs.h
64305 +++ b/include/net/ip_vs.h
64306 @@ -509,7 +509,7 @@ struct ip_vs_conn {
64307 struct ip_vs_conn *control; /* Master control connection */
64308 atomic_t n_control; /* Number of controlled ones */
64309 struct ip_vs_dest *dest; /* real server */
64310 - atomic_t in_pkts; /* incoming packet counter */
64311 + atomic_unchecked_t in_pkts; /* incoming packet counter */
64312
64313 /* packet transmitter for different forwarding methods. If it
64314 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64315 @@ -647,7 +647,7 @@ struct ip_vs_dest {
64316 __be16 port; /* port number of the server */
64317 union nf_inet_addr addr; /* IP address of the server */
64318 volatile unsigned flags; /* dest status flags */
64319 - atomic_t conn_flags; /* flags to copy to conn */
64320 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
64321 atomic_t weight; /* server weight */
64322
64323 atomic_t refcnt; /* reference counter */
64324 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64325 index 69b610a..fe3962c 100644
64326 --- a/include/net/irda/ircomm_core.h
64327 +++ b/include/net/irda/ircomm_core.h
64328 @@ -51,7 +51,7 @@ typedef struct {
64329 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64330 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64331 struct ircomm_info *);
64332 -} call_t;
64333 +} __no_const call_t;
64334
64335 struct ircomm_cb {
64336 irda_queue_t queue;
64337 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64338 index 59ba38bc..d515662 100644
64339 --- a/include/net/irda/ircomm_tty.h
64340 +++ b/include/net/irda/ircomm_tty.h
64341 @@ -35,6 +35,7 @@
64342 #include <linux/termios.h>
64343 #include <linux/timer.h>
64344 #include <linux/tty.h> /* struct tty_struct */
64345 +#include <asm/local.h>
64346
64347 #include <net/irda/irias_object.h>
64348 #include <net/irda/ircomm_core.h>
64349 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64350 unsigned short close_delay;
64351 unsigned short closing_wait; /* time to wait before closing */
64352
64353 - int open_count;
64354 - int blocked_open; /* # of blocked opens */
64355 + local_t open_count;
64356 + local_t blocked_open; /* # of blocked opens */
64357
64358 /* Protect concurent access to :
64359 * o self->open_count
64360 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64361 index 0954ec9..7413562 100644
64362 --- a/include/net/iucv/af_iucv.h
64363 +++ b/include/net/iucv/af_iucv.h
64364 @@ -138,7 +138,7 @@ struct iucv_sock {
64365 struct iucv_sock_list {
64366 struct hlist_head head;
64367 rwlock_t lock;
64368 - atomic_t autobind_name;
64369 + atomic_unchecked_t autobind_name;
64370 };
64371
64372 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64373 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64374 index 34c996f..bb3b4d4 100644
64375 --- a/include/net/neighbour.h
64376 +++ b/include/net/neighbour.h
64377 @@ -123,7 +123,7 @@ struct neigh_ops {
64378 void (*error_report)(struct neighbour *, struct sk_buff *);
64379 int (*output)(struct neighbour *, struct sk_buff *);
64380 int (*connected_output)(struct neighbour *, struct sk_buff *);
64381 -};
64382 +} __do_const;
64383
64384 struct pneigh_entry {
64385 struct pneigh_entry *next;
64386 diff --git a/include/net/netlink.h b/include/net/netlink.h
64387 index cb1f350..3279d2c 100644
64388 --- a/include/net/netlink.h
64389 +++ b/include/net/netlink.h
64390 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64391 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64392 {
64393 if (mark)
64394 - skb_trim(skb, (unsigned char *) mark - skb->data);
64395 + skb_trim(skb, (const unsigned char *) mark - skb->data);
64396 }
64397
64398 /**
64399 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64400 index bbd023a..97c6d0d 100644
64401 --- a/include/net/netns/ipv4.h
64402 +++ b/include/net/netns/ipv4.h
64403 @@ -57,8 +57,8 @@ struct netns_ipv4 {
64404 unsigned int sysctl_ping_group_range[2];
64405 long sysctl_tcp_mem[3];
64406
64407 - atomic_t rt_genid;
64408 - atomic_t dev_addr_genid;
64409 + atomic_unchecked_t rt_genid;
64410 + atomic_unchecked_t dev_addr_genid;
64411
64412 #ifdef CONFIG_IP_MROUTE
64413 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64414 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64415 index d368561..96aaa17 100644
64416 --- a/include/net/sctp/sctp.h
64417 +++ b/include/net/sctp/sctp.h
64418 @@ -318,9 +318,9 @@ do { \
64419
64420 #else /* SCTP_DEBUG */
64421
64422 -#define SCTP_DEBUG_PRINTK(whatever...)
64423 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64424 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64425 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64426 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64427 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64428 #define SCTP_ENABLE_DEBUG
64429 #define SCTP_DISABLE_DEBUG
64430 #define SCTP_ASSERT(expr, str, func)
64431 diff --git a/include/net/sock.h b/include/net/sock.h
64432 index 91c1c8b..15ae923 100644
64433 --- a/include/net/sock.h
64434 +++ b/include/net/sock.h
64435 @@ -299,7 +299,7 @@ struct sock {
64436 #ifdef CONFIG_RPS
64437 __u32 sk_rxhash;
64438 #endif
64439 - atomic_t sk_drops;
64440 + atomic_unchecked_t sk_drops;
64441 int sk_rcvbuf;
64442
64443 struct sk_filter __rcu *sk_filter;
64444 @@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64445 }
64446
64447 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64448 - char __user *from, char *to,
64449 + char __user *from, unsigned char *to,
64450 int copy, int offset)
64451 {
64452 if (skb->ip_summed == CHECKSUM_NONE) {
64453 diff --git a/include/net/tcp.h b/include/net/tcp.h
64454 index 2d80c29..aa07caf 100644
64455 --- a/include/net/tcp.h
64456 +++ b/include/net/tcp.h
64457 @@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
64458 char *name;
64459 sa_family_t family;
64460 const struct file_operations *seq_fops;
64461 - struct seq_operations seq_ops;
64462 + seq_operations_no_const seq_ops;
64463 };
64464
64465 struct tcp_iter_state {
64466 diff --git a/include/net/udp.h b/include/net/udp.h
64467 index e39592f..fef9680 100644
64468 --- a/include/net/udp.h
64469 +++ b/include/net/udp.h
64470 @@ -243,7 +243,7 @@ struct udp_seq_afinfo {
64471 sa_family_t family;
64472 struct udp_table *udp_table;
64473 const struct file_operations *seq_fops;
64474 - struct seq_operations seq_ops;
64475 + seq_operations_no_const seq_ops;
64476 };
64477
64478 struct udp_iter_state {
64479 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64480 index 89174e2..1f82598 100644
64481 --- a/include/net/xfrm.h
64482 +++ b/include/net/xfrm.h
64483 @@ -505,7 +505,7 @@ struct xfrm_policy {
64484 struct timer_list timer;
64485
64486 struct flow_cache_object flo;
64487 - atomic_t genid;
64488 + atomic_unchecked_t genid;
64489 u32 priority;
64490 u32 index;
64491 struct xfrm_mark mark;
64492 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64493 index 1a046b1..ee0bef0 100644
64494 --- a/include/rdma/iw_cm.h
64495 +++ b/include/rdma/iw_cm.h
64496 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
64497 int backlog);
64498
64499 int (*destroy_listen)(struct iw_cm_id *cm_id);
64500 -};
64501 +} __no_const;
64502
64503 /**
64504 * iw_create_cm_id - Create an IW CM identifier.
64505 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64506 index 6a3922f..0b73022 100644
64507 --- a/include/scsi/libfc.h
64508 +++ b/include/scsi/libfc.h
64509 @@ -748,6 +748,7 @@ struct libfc_function_template {
64510 */
64511 void (*disc_stop_final) (struct fc_lport *);
64512 };
64513 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64514
64515 /**
64516 * struct fc_disc - Discovery context
64517 @@ -851,7 +852,7 @@ struct fc_lport {
64518 struct fc_vport *vport;
64519
64520 /* Operational Information */
64521 - struct libfc_function_template tt;
64522 + libfc_function_template_no_const tt;
64523 u8 link_up;
64524 u8 qfull;
64525 enum fc_lport_state state;
64526 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64527 index 77273f2..dd4031f 100644
64528 --- a/include/scsi/scsi_device.h
64529 +++ b/include/scsi/scsi_device.h
64530 @@ -161,9 +161,9 @@ struct scsi_device {
64531 unsigned int max_device_blocked; /* what device_blocked counts down from */
64532 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64533
64534 - atomic_t iorequest_cnt;
64535 - atomic_t iodone_cnt;
64536 - atomic_t ioerr_cnt;
64537 + atomic_unchecked_t iorequest_cnt;
64538 + atomic_unchecked_t iodone_cnt;
64539 + atomic_unchecked_t ioerr_cnt;
64540
64541 struct device sdev_gendev,
64542 sdev_dev;
64543 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64544 index 2a65167..91e01f8 100644
64545 --- a/include/scsi/scsi_transport_fc.h
64546 +++ b/include/scsi/scsi_transport_fc.h
64547 @@ -711,7 +711,7 @@ struct fc_function_template {
64548 unsigned long show_host_system_hostname:1;
64549
64550 unsigned long disable_target_scan:1;
64551 -};
64552 +} __do_const;
64553
64554
64555 /**
64556 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64557 index 030b87c..98a6954 100644
64558 --- a/include/sound/ak4xxx-adda.h
64559 +++ b/include/sound/ak4xxx-adda.h
64560 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64561 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64562 unsigned char val);
64563 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64564 -};
64565 +} __no_const;
64566
64567 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64568
64569 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64570 index 8c05e47..2b5df97 100644
64571 --- a/include/sound/hwdep.h
64572 +++ b/include/sound/hwdep.h
64573 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64574 struct snd_hwdep_dsp_status *status);
64575 int (*dsp_load)(struct snd_hwdep *hw,
64576 struct snd_hwdep_dsp_image *image);
64577 -};
64578 +} __no_const;
64579
64580 struct snd_hwdep {
64581 struct snd_card *card;
64582 diff --git a/include/sound/info.h b/include/sound/info.h
64583 index 9ca1a49..aba1728 100644
64584 --- a/include/sound/info.h
64585 +++ b/include/sound/info.h
64586 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64587 struct snd_info_buffer *buffer);
64588 void (*write)(struct snd_info_entry *entry,
64589 struct snd_info_buffer *buffer);
64590 -};
64591 +} __no_const;
64592
64593 struct snd_info_entry_ops {
64594 int (*open)(struct snd_info_entry *entry,
64595 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64596 index 0cf91b2..b70cae4 100644
64597 --- a/include/sound/pcm.h
64598 +++ b/include/sound/pcm.h
64599 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64600 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64601 int (*ack)(struct snd_pcm_substream *substream);
64602 };
64603 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64604
64605 /*
64606 *
64607 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64608 index af1b49e..a5d55a5 100644
64609 --- a/include/sound/sb16_csp.h
64610 +++ b/include/sound/sb16_csp.h
64611 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64612 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64613 int (*csp_stop) (struct snd_sb_csp * p);
64614 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64615 -};
64616 +} __no_const;
64617
64618 /*
64619 * CSP private data
64620 diff --git a/include/sound/soc.h b/include/sound/soc.h
64621 index 0992dff..bb366fe 100644
64622 --- a/include/sound/soc.h
64623 +++ b/include/sound/soc.h
64624 @@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
64625 /* platform IO - used for platform DAPM */
64626 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64627 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64628 -};
64629 +} __do_const;
64630
64631 struct snd_soc_platform {
64632 const char *name;
64633 @@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
64634 struct snd_soc_dai_link *dai_link;
64635 struct mutex pcm_mutex;
64636 enum snd_soc_pcm_subclass pcm_subclass;
64637 - struct snd_pcm_ops ops;
64638 + snd_pcm_ops_no_const ops;
64639
64640 unsigned int complete:1;
64641 unsigned int dev_registered:1;
64642 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64643 index 444cd6b..3327cc5 100644
64644 --- a/include/sound/ymfpci.h
64645 +++ b/include/sound/ymfpci.h
64646 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64647 spinlock_t reg_lock;
64648 spinlock_t voice_lock;
64649 wait_queue_head_t interrupt_sleep;
64650 - atomic_t interrupt_sleep_count;
64651 + atomic_unchecked_t interrupt_sleep_count;
64652 struct snd_info_entry *proc_entry;
64653 const struct firmware *dsp_microcode;
64654 const struct firmware *controller_microcode;
64655 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64656 index fe73eb8..56388b1 100644
64657 --- a/include/target/target_core_base.h
64658 +++ b/include/target/target_core_base.h
64659 @@ -443,7 +443,7 @@ struct t10_reservation_ops {
64660 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64661 int (*t10_pr_register)(struct se_cmd *);
64662 int (*t10_pr_clear)(struct se_cmd *);
64663 -};
64664 +} __no_const;
64665
64666 struct t10_reservation {
64667 /* Reservation effects all target ports */
64668 @@ -561,8 +561,8 @@ struct se_cmd {
64669 atomic_t t_se_count;
64670 atomic_t t_task_cdbs_left;
64671 atomic_t t_task_cdbs_ex_left;
64672 - atomic_t t_task_cdbs_sent;
64673 - atomic_t t_transport_aborted;
64674 + atomic_unchecked_t t_task_cdbs_sent;
64675 + atomic_unchecked_t t_transport_aborted;
64676 atomic_t t_transport_active;
64677 atomic_t t_transport_complete;
64678 atomic_t t_transport_queue_active;
64679 @@ -799,7 +799,7 @@ struct se_device {
64680 spinlock_t stats_lock;
64681 /* Active commands on this virtual SE device */
64682 atomic_t simple_cmds;
64683 - atomic_t dev_ordered_id;
64684 + atomic_unchecked_t dev_ordered_id;
64685 atomic_t execute_tasks;
64686 atomic_t dev_ordered_sync;
64687 atomic_t dev_qf_count;
64688 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64689 index 1c09820..7f5ec79 100644
64690 --- a/include/trace/events/irq.h
64691 +++ b/include/trace/events/irq.h
64692 @@ -36,7 +36,7 @@ struct softirq_action;
64693 */
64694 TRACE_EVENT(irq_handler_entry,
64695
64696 - TP_PROTO(int irq, struct irqaction *action),
64697 + TP_PROTO(int irq, const struct irqaction *action),
64698
64699 TP_ARGS(irq, action),
64700
64701 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64702 */
64703 TRACE_EVENT(irq_handler_exit,
64704
64705 - TP_PROTO(int irq, struct irqaction *action, int ret),
64706 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64707
64708 TP_ARGS(irq, action, ret),
64709
64710 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64711 index c41f308..6918de3 100644
64712 --- a/include/video/udlfb.h
64713 +++ b/include/video/udlfb.h
64714 @@ -52,10 +52,10 @@ struct dlfb_data {
64715 u32 pseudo_palette[256];
64716 int blank_mode; /*one of FB_BLANK_ */
64717 /* blit-only rendering path metrics, exposed through sysfs */
64718 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64719 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64720 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64721 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64722 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64723 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64724 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64725 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64726 };
64727
64728 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64729 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64730 index 0993a22..32ba2fe 100644
64731 --- a/include/video/uvesafb.h
64732 +++ b/include/video/uvesafb.h
64733 @@ -177,6 +177,7 @@ struct uvesafb_par {
64734 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64735 u8 pmi_setpal; /* PMI for palette changes */
64736 u16 *pmi_base; /* protected mode interface location */
64737 + u8 *pmi_code; /* protected mode code location */
64738 void *pmi_start;
64739 void *pmi_pal;
64740 u8 *vbe_state_orig; /*
64741 diff --git a/init/Kconfig b/init/Kconfig
64742 index 3f42cd6..613f41d 100644
64743 --- a/init/Kconfig
64744 +++ b/init/Kconfig
64745 @@ -799,6 +799,7 @@ endif # CGROUPS
64746
64747 config CHECKPOINT_RESTORE
64748 bool "Checkpoint/restore support" if EXPERT
64749 + depends on !GRKERNSEC
64750 default n
64751 help
64752 Enables additional kernel features in a sake of checkpoint/restore.
64753 @@ -1249,7 +1250,7 @@ config SLUB_DEBUG
64754
64755 config COMPAT_BRK
64756 bool "Disable heap randomization"
64757 - default y
64758 + default n
64759 help
64760 Randomizing heap placement makes heap exploits harder, but it
64761 also breaks ancient binaries (including anything libc5 based).
64762 diff --git a/init/do_mounts.c b/init/do_mounts.c
64763 index 2974c8b..0b863ae 100644
64764 --- a/init/do_mounts.c
64765 +++ b/init/do_mounts.c
64766 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64767 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64768 {
64769 struct super_block *s;
64770 - int err = sys_mount(name, "/root", fs, flags, data);
64771 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64772 if (err)
64773 return err;
64774
64775 - sys_chdir((const char __user __force *)"/root");
64776 + sys_chdir((const char __force_user *)"/root");
64777 s = current->fs->pwd.dentry->d_sb;
64778 ROOT_DEV = s->s_dev;
64779 printk(KERN_INFO
64780 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64781 va_start(args, fmt);
64782 vsprintf(buf, fmt, args);
64783 va_end(args);
64784 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64785 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64786 if (fd >= 0) {
64787 sys_ioctl(fd, FDEJECT, 0);
64788 sys_close(fd);
64789 }
64790 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64791 - fd = sys_open("/dev/console", O_RDWR, 0);
64792 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64793 if (fd >= 0) {
64794 sys_ioctl(fd, TCGETS, (long)&termios);
64795 termios.c_lflag &= ~ICANON;
64796 sys_ioctl(fd, TCSETSF, (long)&termios);
64797 - sys_read(fd, &c, 1);
64798 + sys_read(fd, (char __user *)&c, 1);
64799 termios.c_lflag |= ICANON;
64800 sys_ioctl(fd, TCSETSF, (long)&termios);
64801 sys_close(fd);
64802 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64803 mount_root();
64804 out:
64805 devtmpfs_mount("dev");
64806 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64807 - sys_chroot((const char __user __force *)".");
64808 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64809 + sys_chroot((const char __force_user *)".");
64810 }
64811 diff --git a/init/do_mounts.h b/init/do_mounts.h
64812 index f5b978a..69dbfe8 100644
64813 --- a/init/do_mounts.h
64814 +++ b/init/do_mounts.h
64815 @@ -15,15 +15,15 @@ extern int root_mountflags;
64816
64817 static inline int create_dev(char *name, dev_t dev)
64818 {
64819 - sys_unlink(name);
64820 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64821 + sys_unlink((char __force_user *)name);
64822 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64823 }
64824
64825 #if BITS_PER_LONG == 32
64826 static inline u32 bstat(char *name)
64827 {
64828 struct stat64 stat;
64829 - if (sys_stat64(name, &stat) != 0)
64830 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64831 return 0;
64832 if (!S_ISBLK(stat.st_mode))
64833 return 0;
64834 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64835 static inline u32 bstat(char *name)
64836 {
64837 struct stat stat;
64838 - if (sys_newstat(name, &stat) != 0)
64839 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64840 return 0;
64841 if (!S_ISBLK(stat.st_mode))
64842 return 0;
64843 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64844 index 3098a38..253064e 100644
64845 --- a/init/do_mounts_initrd.c
64846 +++ b/init/do_mounts_initrd.c
64847 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64848 create_dev("/dev/root.old", Root_RAM0);
64849 /* mount initrd on rootfs' /root */
64850 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64851 - sys_mkdir("/old", 0700);
64852 - root_fd = sys_open("/", 0, 0);
64853 - old_fd = sys_open("/old", 0, 0);
64854 + sys_mkdir((const char __force_user *)"/old", 0700);
64855 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64856 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64857 /* move initrd over / and chdir/chroot in initrd root */
64858 - sys_chdir("/root");
64859 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64860 - sys_chroot(".");
64861 + sys_chdir((const char __force_user *)"/root");
64862 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64863 + sys_chroot((const char __force_user *)".");
64864
64865 /*
64866 * In case that a resume from disk is carried out by linuxrc or one of
64867 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64868
64869 /* move initrd to rootfs' /old */
64870 sys_fchdir(old_fd);
64871 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64872 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64873 /* switch root and cwd back to / of rootfs */
64874 sys_fchdir(root_fd);
64875 - sys_chroot(".");
64876 + sys_chroot((const char __force_user *)".");
64877 sys_close(old_fd);
64878 sys_close(root_fd);
64879
64880 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64881 - sys_chdir("/old");
64882 + sys_chdir((const char __force_user *)"/old");
64883 return;
64884 }
64885
64886 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64887 mount_root();
64888
64889 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64890 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64891 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64892 if (!error)
64893 printk("okay\n");
64894 else {
64895 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64896 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64897 if (error == -ENOENT)
64898 printk("/initrd does not exist. Ignored.\n");
64899 else
64900 printk("failed\n");
64901 printk(KERN_NOTICE "Unmounting old root\n");
64902 - sys_umount("/old", MNT_DETACH);
64903 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64904 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64905 if (fd < 0) {
64906 error = fd;
64907 @@ -116,11 +116,11 @@ int __init initrd_load(void)
64908 * mounted in the normal path.
64909 */
64910 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64911 - sys_unlink("/initrd.image");
64912 + sys_unlink((const char __force_user *)"/initrd.image");
64913 handle_initrd();
64914 return 1;
64915 }
64916 }
64917 - sys_unlink("/initrd.image");
64918 + sys_unlink((const char __force_user *)"/initrd.image");
64919 return 0;
64920 }
64921 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64922 index 32c4799..c27ee74 100644
64923 --- a/init/do_mounts_md.c
64924 +++ b/init/do_mounts_md.c
64925 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64926 partitioned ? "_d" : "", minor,
64927 md_setup_args[ent].device_names);
64928
64929 - fd = sys_open(name, 0, 0);
64930 + fd = sys_open((char __force_user *)name, 0, 0);
64931 if (fd < 0) {
64932 printk(KERN_ERR "md: open failed - cannot start "
64933 "array %s\n", name);
64934 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64935 * array without it
64936 */
64937 sys_close(fd);
64938 - fd = sys_open(name, 0, 0);
64939 + fd = sys_open((char __force_user *)name, 0, 0);
64940 sys_ioctl(fd, BLKRRPART, 0);
64941 }
64942 sys_close(fd);
64943 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64944
64945 wait_for_device_probe();
64946
64947 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64948 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64949 if (fd >= 0) {
64950 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64951 sys_close(fd);
64952 diff --git a/init/initramfs.c b/init/initramfs.c
64953 index 8216c30..25e8e32 100644
64954 --- a/init/initramfs.c
64955 +++ b/init/initramfs.c
64956 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64957 }
64958 }
64959
64960 -static long __init do_utime(char __user *filename, time_t mtime)
64961 +static long __init do_utime(__force char __user *filename, time_t mtime)
64962 {
64963 struct timespec t[2];
64964
64965 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64966 struct dir_entry *de, *tmp;
64967 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64968 list_del(&de->list);
64969 - do_utime(de->name, de->mtime);
64970 + do_utime((char __force_user *)de->name, de->mtime);
64971 kfree(de->name);
64972 kfree(de);
64973 }
64974 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64975 if (nlink >= 2) {
64976 char *old = find_link(major, minor, ino, mode, collected);
64977 if (old)
64978 - return (sys_link(old, collected) < 0) ? -1 : 1;
64979 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64980 }
64981 return 0;
64982 }
64983 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64984 {
64985 struct stat st;
64986
64987 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64988 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64989 if (S_ISDIR(st.st_mode))
64990 - sys_rmdir(path);
64991 + sys_rmdir((char __force_user *)path);
64992 else
64993 - sys_unlink(path);
64994 + sys_unlink((char __force_user *)path);
64995 }
64996 }
64997
64998 @@ -305,7 +305,7 @@ static int __init do_name(void)
64999 int openflags = O_WRONLY|O_CREAT;
65000 if (ml != 1)
65001 openflags |= O_TRUNC;
65002 - wfd = sys_open(collected, openflags, mode);
65003 + wfd = sys_open((char __force_user *)collected, openflags, mode);
65004
65005 if (wfd >= 0) {
65006 sys_fchown(wfd, uid, gid);
65007 @@ -317,17 +317,17 @@ static int __init do_name(void)
65008 }
65009 }
65010 } else if (S_ISDIR(mode)) {
65011 - sys_mkdir(collected, mode);
65012 - sys_chown(collected, uid, gid);
65013 - sys_chmod(collected, mode);
65014 + sys_mkdir((char __force_user *)collected, mode);
65015 + sys_chown((char __force_user *)collected, uid, gid);
65016 + sys_chmod((char __force_user *)collected, mode);
65017 dir_add(collected, mtime);
65018 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65019 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65020 if (maybe_link() == 0) {
65021 - sys_mknod(collected, mode, rdev);
65022 - sys_chown(collected, uid, gid);
65023 - sys_chmod(collected, mode);
65024 - do_utime(collected, mtime);
65025 + sys_mknod((char __force_user *)collected, mode, rdev);
65026 + sys_chown((char __force_user *)collected, uid, gid);
65027 + sys_chmod((char __force_user *)collected, mode);
65028 + do_utime((char __force_user *)collected, mtime);
65029 }
65030 }
65031 return 0;
65032 @@ -336,15 +336,15 @@ static int __init do_name(void)
65033 static int __init do_copy(void)
65034 {
65035 if (count >= body_len) {
65036 - sys_write(wfd, victim, body_len);
65037 + sys_write(wfd, (char __force_user *)victim, body_len);
65038 sys_close(wfd);
65039 - do_utime(vcollected, mtime);
65040 + do_utime((char __force_user *)vcollected, mtime);
65041 kfree(vcollected);
65042 eat(body_len);
65043 state = SkipIt;
65044 return 0;
65045 } else {
65046 - sys_write(wfd, victim, count);
65047 + sys_write(wfd, (char __force_user *)victim, count);
65048 body_len -= count;
65049 eat(count);
65050 return 1;
65051 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
65052 {
65053 collected[N_ALIGN(name_len) + body_len] = '\0';
65054 clean_path(collected, 0);
65055 - sys_symlink(collected + N_ALIGN(name_len), collected);
65056 - sys_lchown(collected, uid, gid);
65057 - do_utime(collected, mtime);
65058 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65059 + sys_lchown((char __force_user *)collected, uid, gid);
65060 + do_utime((char __force_user *)collected, mtime);
65061 state = SkipIt;
65062 next_state = Reset;
65063 return 0;
65064 diff --git a/init/main.c b/init/main.c
65065 index ff49a6d..5fa0429 100644
65066 --- a/init/main.c
65067 +++ b/init/main.c
65068 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
65069 extern void tc_init(void);
65070 #endif
65071
65072 +extern void grsecurity_init(void);
65073 +
65074 /*
65075 * Debug helper: via this flag we know that we are in 'early bootup code'
65076 * where only the boot processor is running with IRQ disabled. This means
65077 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
65078
65079 __setup("reset_devices", set_reset_devices);
65080
65081 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65082 +extern char pax_enter_kernel_user[];
65083 +extern char pax_exit_kernel_user[];
65084 +extern pgdval_t clone_pgd_mask;
65085 +#endif
65086 +
65087 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65088 +static int __init setup_pax_nouderef(char *str)
65089 +{
65090 +#ifdef CONFIG_X86_32
65091 + unsigned int cpu;
65092 + struct desc_struct *gdt;
65093 +
65094 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
65095 + gdt = get_cpu_gdt_table(cpu);
65096 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65097 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65098 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65099 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65100 + }
65101 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65102 +#else
65103 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65104 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65105 + clone_pgd_mask = ~(pgdval_t)0UL;
65106 +#endif
65107 +
65108 + return 0;
65109 +}
65110 +early_param("pax_nouderef", setup_pax_nouderef);
65111 +#endif
65112 +
65113 +#ifdef CONFIG_PAX_SOFTMODE
65114 +int pax_softmode;
65115 +
65116 +static int __init setup_pax_softmode(char *str)
65117 +{
65118 + get_option(&str, &pax_softmode);
65119 + return 1;
65120 +}
65121 +__setup("pax_softmode=", setup_pax_softmode);
65122 +#endif
65123 +
65124 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65125 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65126 static const char *panic_later, *panic_param;
65127 @@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65128 {
65129 int count = preempt_count();
65130 int ret;
65131 + const char *msg1 = "", *msg2 = "";
65132
65133 if (initcall_debug)
65134 ret = do_one_initcall_debug(fn);
65135 @@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65136 sprintf(msgbuf, "error code %d ", ret);
65137
65138 if (preempt_count() != count) {
65139 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65140 + msg1 = " preemption imbalance";
65141 preempt_count() = count;
65142 }
65143 if (irqs_disabled()) {
65144 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65145 + msg2 = " disabled interrupts";
65146 local_irq_enable();
65147 }
65148 - if (msgbuf[0]) {
65149 - printk("initcall %pF returned with %s\n", fn, msgbuf);
65150 + if (msgbuf[0] || *msg1 || *msg2) {
65151 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65152 }
65153
65154 return ret;
65155 @@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
65156 do_basic_setup();
65157
65158 /* Open the /dev/console on the rootfs, this should never fail */
65159 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65160 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65161 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65162
65163 (void) sys_dup(0);
65164 @@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
65165 if (!ramdisk_execute_command)
65166 ramdisk_execute_command = "/init";
65167
65168 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65169 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65170 ramdisk_execute_command = NULL;
65171 prepare_namespace();
65172 }
65173
65174 + grsecurity_init();
65175 +
65176 /*
65177 * Ok, we have completed the initial bootup, and
65178 * we're essentially up and running. Get rid of the
65179 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65180 index 86ee272..773d937 100644
65181 --- a/ipc/mqueue.c
65182 +++ b/ipc/mqueue.c
65183 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65184 mq_bytes = (mq_msg_tblsz +
65185 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65186
65187 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65188 spin_lock(&mq_lock);
65189 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65190 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
65191 diff --git a/ipc/msg.c b/ipc/msg.c
65192 index 7385de2..a8180e08 100644
65193 --- a/ipc/msg.c
65194 +++ b/ipc/msg.c
65195 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65196 return security_msg_queue_associate(msq, msgflg);
65197 }
65198
65199 +static struct ipc_ops msg_ops = {
65200 + .getnew = newque,
65201 + .associate = msg_security,
65202 + .more_checks = NULL
65203 +};
65204 +
65205 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65206 {
65207 struct ipc_namespace *ns;
65208 - struct ipc_ops msg_ops;
65209 struct ipc_params msg_params;
65210
65211 ns = current->nsproxy->ipc_ns;
65212
65213 - msg_ops.getnew = newque;
65214 - msg_ops.associate = msg_security;
65215 - msg_ops.more_checks = NULL;
65216 -
65217 msg_params.key = key;
65218 msg_params.flg = msgflg;
65219
65220 diff --git a/ipc/sem.c b/ipc/sem.c
65221 index 5215a81..cfc0cac 100644
65222 --- a/ipc/sem.c
65223 +++ b/ipc/sem.c
65224 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65225 return 0;
65226 }
65227
65228 +static struct ipc_ops sem_ops = {
65229 + .getnew = newary,
65230 + .associate = sem_security,
65231 + .more_checks = sem_more_checks
65232 +};
65233 +
65234 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65235 {
65236 struct ipc_namespace *ns;
65237 - struct ipc_ops sem_ops;
65238 struct ipc_params sem_params;
65239
65240 ns = current->nsproxy->ipc_ns;
65241 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65242 if (nsems < 0 || nsems > ns->sc_semmsl)
65243 return -EINVAL;
65244
65245 - sem_ops.getnew = newary;
65246 - sem_ops.associate = sem_security;
65247 - sem_ops.more_checks = sem_more_checks;
65248 -
65249 sem_params.key = key;
65250 sem_params.flg = semflg;
65251 sem_params.u.nsems = nsems;
65252 diff --git a/ipc/shm.c b/ipc/shm.c
65253 index b76be5b..859e750 100644
65254 --- a/ipc/shm.c
65255 +++ b/ipc/shm.c
65256 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65257 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65258 #endif
65259
65260 +#ifdef CONFIG_GRKERNSEC
65261 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65262 + const time_t shm_createtime, const uid_t cuid,
65263 + const int shmid);
65264 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65265 + const time_t shm_createtime);
65266 +#endif
65267 +
65268 void shm_init_ns(struct ipc_namespace *ns)
65269 {
65270 ns->shm_ctlmax = SHMMAX;
65271 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65272 shp->shm_lprid = 0;
65273 shp->shm_atim = shp->shm_dtim = 0;
65274 shp->shm_ctim = get_seconds();
65275 +#ifdef CONFIG_GRKERNSEC
65276 + {
65277 + struct timespec timeval;
65278 + do_posix_clock_monotonic_gettime(&timeval);
65279 +
65280 + shp->shm_createtime = timeval.tv_sec;
65281 + }
65282 +#endif
65283 shp->shm_segsz = size;
65284 shp->shm_nattch = 0;
65285 shp->shm_file = file;
65286 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65287 return 0;
65288 }
65289
65290 +static struct ipc_ops shm_ops = {
65291 + .getnew = newseg,
65292 + .associate = shm_security,
65293 + .more_checks = shm_more_checks
65294 +};
65295 +
65296 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65297 {
65298 struct ipc_namespace *ns;
65299 - struct ipc_ops shm_ops;
65300 struct ipc_params shm_params;
65301
65302 ns = current->nsproxy->ipc_ns;
65303
65304 - shm_ops.getnew = newseg;
65305 - shm_ops.associate = shm_security;
65306 - shm_ops.more_checks = shm_more_checks;
65307 -
65308 shm_params.key = key;
65309 shm_params.flg = shmflg;
65310 shm_params.u.size = size;
65311 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65312 f_mode = FMODE_READ | FMODE_WRITE;
65313 }
65314 if (shmflg & SHM_EXEC) {
65315 +
65316 +#ifdef CONFIG_PAX_MPROTECT
65317 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
65318 + goto out;
65319 +#endif
65320 +
65321 prot |= PROT_EXEC;
65322 acc_mode |= S_IXUGO;
65323 }
65324 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65325 if (err)
65326 goto out_unlock;
65327
65328 +#ifdef CONFIG_GRKERNSEC
65329 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65330 + shp->shm_perm.cuid, shmid) ||
65331 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65332 + err = -EACCES;
65333 + goto out_unlock;
65334 + }
65335 +#endif
65336 +
65337 path = shp->shm_file->f_path;
65338 path_get(&path);
65339 shp->shm_nattch++;
65340 +#ifdef CONFIG_GRKERNSEC
65341 + shp->shm_lapid = current->pid;
65342 +#endif
65343 size = i_size_read(path.dentry->d_inode);
65344 shm_unlock(shp);
65345
65346 diff --git a/kernel/acct.c b/kernel/acct.c
65347 index 02e6167..54824f7 100644
65348 --- a/kernel/acct.c
65349 +++ b/kernel/acct.c
65350 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65351 */
65352 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65353 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65354 - file->f_op->write(file, (char *)&ac,
65355 + file->f_op->write(file, (char __force_user *)&ac,
65356 sizeof(acct_t), &file->f_pos);
65357 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65358 set_fs(fs);
65359 diff --git a/kernel/audit.c b/kernel/audit.c
65360 index bb0eb5b..cf2a03a 100644
65361 --- a/kernel/audit.c
65362 +++ b/kernel/audit.c
65363 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65364 3) suppressed due to audit_rate_limit
65365 4) suppressed due to audit_backlog_limit
65366 */
65367 -static atomic_t audit_lost = ATOMIC_INIT(0);
65368 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65369
65370 /* The netlink socket. */
65371 static struct sock *audit_sock;
65372 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65373 unsigned long now;
65374 int print;
65375
65376 - atomic_inc(&audit_lost);
65377 + atomic_inc_unchecked(&audit_lost);
65378
65379 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65380
65381 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65382 printk(KERN_WARNING
65383 "audit: audit_lost=%d audit_rate_limit=%d "
65384 "audit_backlog_limit=%d\n",
65385 - atomic_read(&audit_lost),
65386 + atomic_read_unchecked(&audit_lost),
65387 audit_rate_limit,
65388 audit_backlog_limit);
65389 audit_panic(message);
65390 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65391 status_set.pid = audit_pid;
65392 status_set.rate_limit = audit_rate_limit;
65393 status_set.backlog_limit = audit_backlog_limit;
65394 - status_set.lost = atomic_read(&audit_lost);
65395 + status_set.lost = atomic_read_unchecked(&audit_lost);
65396 status_set.backlog = skb_queue_len(&audit_skb_queue);
65397 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65398 &status_set, sizeof(status_set));
65399 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65400 index af1de0f..06dfe57 100644
65401 --- a/kernel/auditsc.c
65402 +++ b/kernel/auditsc.c
65403 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65404 }
65405
65406 /* global counter which is incremented every time something logs in */
65407 -static atomic_t session_id = ATOMIC_INIT(0);
65408 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65409
65410 /**
65411 * audit_set_loginuid - set current task's audit_context loginuid
65412 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65413 return -EPERM;
65414 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65415
65416 - sessionid = atomic_inc_return(&session_id);
65417 + sessionid = atomic_inc_return_unchecked(&session_id);
65418 if (context && context->in_syscall) {
65419 struct audit_buffer *ab;
65420
65421 diff --git a/kernel/capability.c b/kernel/capability.c
65422 index 3f1adb6..c564db0 100644
65423 --- a/kernel/capability.c
65424 +++ b/kernel/capability.c
65425 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65426 * before modification is attempted and the application
65427 * fails.
65428 */
65429 + if (tocopy > ARRAY_SIZE(kdata))
65430 + return -EFAULT;
65431 +
65432 if (copy_to_user(dataptr, kdata, tocopy
65433 * sizeof(struct __user_cap_data_struct))) {
65434 return -EFAULT;
65435 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65436 int ret;
65437
65438 rcu_read_lock();
65439 - ret = security_capable(__task_cred(t), ns, cap);
65440 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65441 + gr_task_is_capable(t, __task_cred(t), cap);
65442 rcu_read_unlock();
65443
65444 - return (ret == 0);
65445 + return ret;
65446 }
65447
65448 /**
65449 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65450 int ret;
65451
65452 rcu_read_lock();
65453 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
65454 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65455 rcu_read_unlock();
65456
65457 - return (ret == 0);
65458 + return ret;
65459 }
65460
65461 /**
65462 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65463 BUG();
65464 }
65465
65466 - if (security_capable(current_cred(), ns, cap) == 0) {
65467 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65468 current->flags |= PF_SUPERPRIV;
65469 return true;
65470 }
65471 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65472 }
65473 EXPORT_SYMBOL(ns_capable);
65474
65475 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65476 +{
65477 + if (unlikely(!cap_valid(cap))) {
65478 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65479 + BUG();
65480 + }
65481 +
65482 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65483 + current->flags |= PF_SUPERPRIV;
65484 + return true;
65485 + }
65486 + return false;
65487 +}
65488 +EXPORT_SYMBOL(ns_capable_nolog);
65489 +
65490 /**
65491 * capable - Determine if the current task has a superior capability in effect
65492 * @cap: The capability to be tested for
65493 @@ -408,6 +427,12 @@ bool capable(int cap)
65494 }
65495 EXPORT_SYMBOL(capable);
65496
65497 +bool capable_nolog(int cap)
65498 +{
65499 + return ns_capable_nolog(&init_user_ns, cap);
65500 +}
65501 +EXPORT_SYMBOL(capable_nolog);
65502 +
65503 /**
65504 * nsown_capable - Check superior capability to one's own user_ns
65505 * @cap: The capability in question
65506 diff --git a/kernel/compat.c b/kernel/compat.c
65507 index f346ced..aa2b1f4 100644
65508 --- a/kernel/compat.c
65509 +++ b/kernel/compat.c
65510 @@ -13,6 +13,7 @@
65511
65512 #include <linux/linkage.h>
65513 #include <linux/compat.h>
65514 +#include <linux/module.h>
65515 #include <linux/errno.h>
65516 #include <linux/time.h>
65517 #include <linux/signal.h>
65518 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65519 mm_segment_t oldfs;
65520 long ret;
65521
65522 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65523 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65524 oldfs = get_fs();
65525 set_fs(KERNEL_DS);
65526 ret = hrtimer_nanosleep_restart(restart);
65527 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65528 oldfs = get_fs();
65529 set_fs(KERNEL_DS);
65530 ret = hrtimer_nanosleep(&tu,
65531 - rmtp ? (struct timespec __user *)&rmt : NULL,
65532 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65533 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65534 set_fs(oldfs);
65535
65536 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65537 mm_segment_t old_fs = get_fs();
65538
65539 set_fs(KERNEL_DS);
65540 - ret = sys_sigpending((old_sigset_t __user *) &s);
65541 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65542 set_fs(old_fs);
65543 if (ret == 0)
65544 ret = put_user(s, set);
65545 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
65546 old_fs = get_fs();
65547 set_fs(KERNEL_DS);
65548 ret = sys_sigprocmask(how,
65549 - set ? (old_sigset_t __user *) &s : NULL,
65550 - oset ? (old_sigset_t __user *) &s : NULL);
65551 + set ? (old_sigset_t __force_user *) &s : NULL,
65552 + oset ? (old_sigset_t __force_user *) &s : NULL);
65553 set_fs(old_fs);
65554 if (ret == 0)
65555 if (oset)
65556 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65557 mm_segment_t old_fs = get_fs();
65558
65559 set_fs(KERNEL_DS);
65560 - ret = sys_old_getrlimit(resource, &r);
65561 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65562 set_fs(old_fs);
65563
65564 if (!ret) {
65565 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65566 mm_segment_t old_fs = get_fs();
65567
65568 set_fs(KERNEL_DS);
65569 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65570 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65571 set_fs(old_fs);
65572
65573 if (ret)
65574 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65575 set_fs (KERNEL_DS);
65576 ret = sys_wait4(pid,
65577 (stat_addr ?
65578 - (unsigned int __user *) &status : NULL),
65579 - options, (struct rusage __user *) &r);
65580 + (unsigned int __force_user *) &status : NULL),
65581 + options, (struct rusage __force_user *) &r);
65582 set_fs (old_fs);
65583
65584 if (ret > 0) {
65585 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65586 memset(&info, 0, sizeof(info));
65587
65588 set_fs(KERNEL_DS);
65589 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65590 - uru ? (struct rusage __user *)&ru : NULL);
65591 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65592 + uru ? (struct rusage __force_user *)&ru : NULL);
65593 set_fs(old_fs);
65594
65595 if ((ret < 0) || (info.si_signo == 0))
65596 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65597 oldfs = get_fs();
65598 set_fs(KERNEL_DS);
65599 err = sys_timer_settime(timer_id, flags,
65600 - (struct itimerspec __user *) &newts,
65601 - (struct itimerspec __user *) &oldts);
65602 + (struct itimerspec __force_user *) &newts,
65603 + (struct itimerspec __force_user *) &oldts);
65604 set_fs(oldfs);
65605 if (!err && old && put_compat_itimerspec(old, &oldts))
65606 return -EFAULT;
65607 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65608 oldfs = get_fs();
65609 set_fs(KERNEL_DS);
65610 err = sys_timer_gettime(timer_id,
65611 - (struct itimerspec __user *) &ts);
65612 + (struct itimerspec __force_user *) &ts);
65613 set_fs(oldfs);
65614 if (!err && put_compat_itimerspec(setting, &ts))
65615 return -EFAULT;
65616 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65617 oldfs = get_fs();
65618 set_fs(KERNEL_DS);
65619 err = sys_clock_settime(which_clock,
65620 - (struct timespec __user *) &ts);
65621 + (struct timespec __force_user *) &ts);
65622 set_fs(oldfs);
65623 return err;
65624 }
65625 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65626 oldfs = get_fs();
65627 set_fs(KERNEL_DS);
65628 err = sys_clock_gettime(which_clock,
65629 - (struct timespec __user *) &ts);
65630 + (struct timespec __force_user *) &ts);
65631 set_fs(oldfs);
65632 if (!err && put_compat_timespec(&ts, tp))
65633 return -EFAULT;
65634 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65635
65636 oldfs = get_fs();
65637 set_fs(KERNEL_DS);
65638 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65639 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65640 set_fs(oldfs);
65641
65642 err = compat_put_timex(utp, &txc);
65643 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65644 oldfs = get_fs();
65645 set_fs(KERNEL_DS);
65646 err = sys_clock_getres(which_clock,
65647 - (struct timespec __user *) &ts);
65648 + (struct timespec __force_user *) &ts);
65649 set_fs(oldfs);
65650 if (!err && tp && put_compat_timespec(&ts, tp))
65651 return -EFAULT;
65652 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65653 long err;
65654 mm_segment_t oldfs;
65655 struct timespec tu;
65656 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65657 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65658
65659 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65660 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65661 oldfs = get_fs();
65662 set_fs(KERNEL_DS);
65663 err = clock_nanosleep_restart(restart);
65664 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65665 oldfs = get_fs();
65666 set_fs(KERNEL_DS);
65667 err = sys_clock_nanosleep(which_clock, flags,
65668 - (struct timespec __user *) &in,
65669 - (struct timespec __user *) &out);
65670 + (struct timespec __force_user *) &in,
65671 + (struct timespec __force_user *) &out);
65672 set_fs(oldfs);
65673
65674 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65675 diff --git a/kernel/configs.c b/kernel/configs.c
65676 index 42e8fa0..9e7406b 100644
65677 --- a/kernel/configs.c
65678 +++ b/kernel/configs.c
65679 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65680 struct proc_dir_entry *entry;
65681
65682 /* create the current config file */
65683 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65684 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65685 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65686 + &ikconfig_file_ops);
65687 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65688 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65689 + &ikconfig_file_ops);
65690 +#endif
65691 +#else
65692 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65693 &ikconfig_file_ops);
65694 +#endif
65695 +
65696 if (!entry)
65697 return -ENOMEM;
65698
65699 diff --git a/kernel/cred.c b/kernel/cred.c
65700 index 48c6fd3..3342f00 100644
65701 --- a/kernel/cred.c
65702 +++ b/kernel/cred.c
65703 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
65704 validate_creds(cred);
65705 put_cred(cred);
65706 }
65707 +
65708 +#ifdef CONFIG_GRKERNSEC_SETXID
65709 + cred = (struct cred *) tsk->delayed_cred;
65710 + if (cred) {
65711 + tsk->delayed_cred = NULL;
65712 + validate_creds(cred);
65713 + put_cred(cred);
65714 + }
65715 +#endif
65716 }
65717
65718 /**
65719 @@ -472,7 +481,7 @@ error_put:
65720 * Always returns 0 thus allowing this function to be tail-called at the end
65721 * of, say, sys_setgid().
65722 */
65723 -int commit_creds(struct cred *new)
65724 +static int __commit_creds(struct cred *new)
65725 {
65726 struct task_struct *task = current;
65727 const struct cred *old = task->real_cred;
65728 @@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
65729
65730 get_cred(new); /* we will require a ref for the subj creds too */
65731
65732 + gr_set_role_label(task, new->uid, new->gid);
65733 +
65734 /* dumpability changes */
65735 if (old->euid != new->euid ||
65736 old->egid != new->egid ||
65737 @@ -540,6 +551,92 @@ int commit_creds(struct cred *new)
65738 put_cred(old);
65739 return 0;
65740 }
65741 +#ifdef CONFIG_GRKERNSEC_SETXID
65742 +extern int set_user(struct cred *new);
65743 +
65744 +void gr_delayed_cred_worker(void)
65745 +{
65746 + const struct cred *new = current->delayed_cred;
65747 + struct cred *ncred;
65748 +
65749 + current->delayed_cred = NULL;
65750 +
65751 + if (current_uid() && new != NULL) {
65752 + // from doing get_cred on it when queueing this
65753 + put_cred(new);
65754 + return;
65755 + } else if (new == NULL)
65756 + return;
65757 +
65758 + ncred = prepare_creds();
65759 + if (!ncred)
65760 + goto die;
65761 + // uids
65762 + ncred->uid = new->uid;
65763 + ncred->euid = new->euid;
65764 + ncred->suid = new->suid;
65765 + ncred->fsuid = new->fsuid;
65766 + // gids
65767 + ncred->gid = new->gid;
65768 + ncred->egid = new->egid;
65769 + ncred->sgid = new->sgid;
65770 + ncred->fsgid = new->fsgid;
65771 + // groups
65772 + if (set_groups(ncred, new->group_info) < 0) {
65773 + abort_creds(ncred);
65774 + goto die;
65775 + }
65776 + // caps
65777 + ncred->securebits = new->securebits;
65778 + ncred->cap_inheritable = new->cap_inheritable;
65779 + ncred->cap_permitted = new->cap_permitted;
65780 + ncred->cap_effective = new->cap_effective;
65781 + ncred->cap_bset = new->cap_bset;
65782 +
65783 + if (set_user(ncred)) {
65784 + abort_creds(ncred);
65785 + goto die;
65786 + }
65787 +
65788 + // from doing get_cred on it when queueing this
65789 + put_cred(new);
65790 +
65791 + __commit_creds(ncred);
65792 + return;
65793 +die:
65794 + // from doing get_cred on it when queueing this
65795 + put_cred(new);
65796 + do_group_exit(SIGKILL);
65797 +}
65798 +#endif
65799 +
65800 +int commit_creds(struct cred *new)
65801 +{
65802 +#ifdef CONFIG_GRKERNSEC_SETXID
65803 + struct task_struct *t;
65804 +
65805 + /* we won't get called with tasklist_lock held for writing
65806 + and interrupts disabled as the cred struct in that case is
65807 + init_cred
65808 + */
65809 + if (grsec_enable_setxid && !current_is_single_threaded() &&
65810 + !current_uid() && new->uid) {
65811 + rcu_read_lock();
65812 + read_lock(&tasklist_lock);
65813 + for (t = next_thread(current); t != current;
65814 + t = next_thread(t)) {
65815 + if (t->delayed_cred == NULL) {
65816 + t->delayed_cred = get_cred(new);
65817 + set_tsk_need_resched(t);
65818 + }
65819 + }
65820 + read_unlock(&tasklist_lock);
65821 + rcu_read_unlock();
65822 + }
65823 +#endif
65824 + return __commit_creds(new);
65825 +}
65826 +
65827 EXPORT_SYMBOL(commit_creds);
65828
65829 /**
65830 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65831 index 7fda904..59f620c 100644
65832 --- a/kernel/debug/debug_core.c
65833 +++ b/kernel/debug/debug_core.c
65834 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65835 */
65836 static atomic_t masters_in_kgdb;
65837 static atomic_t slaves_in_kgdb;
65838 -static atomic_t kgdb_break_tasklet_var;
65839 +static atomic_unchecked_t kgdb_break_tasklet_var;
65840 atomic_t kgdb_setting_breakpoint;
65841
65842 struct task_struct *kgdb_usethread;
65843 @@ -129,7 +129,7 @@ int kgdb_single_step;
65844 static pid_t kgdb_sstep_pid;
65845
65846 /* to keep track of the CPU which is doing the single stepping*/
65847 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65848 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65849
65850 /*
65851 * If you are debugging a problem where roundup (the collection of
65852 @@ -537,7 +537,7 @@ return_normal:
65853 * kernel will only try for the value of sstep_tries before
65854 * giving up and continuing on.
65855 */
65856 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65857 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65858 (kgdb_info[cpu].task &&
65859 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65860 atomic_set(&kgdb_active, -1);
65861 @@ -631,8 +631,8 @@ cpu_master_loop:
65862 }
65863
65864 kgdb_restore:
65865 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65866 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65867 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65868 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65869 if (kgdb_info[sstep_cpu].task)
65870 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65871 else
65872 @@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
65873 static void kgdb_tasklet_bpt(unsigned long ing)
65874 {
65875 kgdb_breakpoint();
65876 - atomic_set(&kgdb_break_tasklet_var, 0);
65877 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65878 }
65879
65880 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65881
65882 void kgdb_schedule_breakpoint(void)
65883 {
65884 - if (atomic_read(&kgdb_break_tasklet_var) ||
65885 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65886 atomic_read(&kgdb_active) != -1 ||
65887 atomic_read(&kgdb_setting_breakpoint))
65888 return;
65889 - atomic_inc(&kgdb_break_tasklet_var);
65890 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65891 tasklet_schedule(&kgdb_tasklet_breakpoint);
65892 }
65893 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65894 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65895 index e2ae734..08a4c5c 100644
65896 --- a/kernel/debug/kdb/kdb_main.c
65897 +++ b/kernel/debug/kdb/kdb_main.c
65898 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65899 list_for_each_entry(mod, kdb_modules, list) {
65900
65901 kdb_printf("%-20s%8u 0x%p ", mod->name,
65902 - mod->core_size, (void *)mod);
65903 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65904 #ifdef CONFIG_MODULE_UNLOAD
65905 kdb_printf("%4ld ", module_refcount(mod));
65906 #endif
65907 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65908 kdb_printf(" (Loading)");
65909 else
65910 kdb_printf(" (Live)");
65911 - kdb_printf(" 0x%p", mod->module_core);
65912 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65913
65914 #ifdef CONFIG_MODULE_UNLOAD
65915 {
65916 diff --git a/kernel/events/core.c b/kernel/events/core.c
65917 index 1b5c081..c375f83 100644
65918 --- a/kernel/events/core.c
65919 +++ b/kernel/events/core.c
65920 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65921 return 0;
65922 }
65923
65924 -static atomic64_t perf_event_id;
65925 +static atomic64_unchecked_t perf_event_id;
65926
65927 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65928 enum event_type_t event_type);
65929 @@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
65930
65931 static inline u64 perf_event_count(struct perf_event *event)
65932 {
65933 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65934 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65935 }
65936
65937 static u64 perf_event_read(struct perf_event *event)
65938 @@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65939 mutex_lock(&event->child_mutex);
65940 total += perf_event_read(event);
65941 *enabled += event->total_time_enabled +
65942 - atomic64_read(&event->child_total_time_enabled);
65943 + atomic64_read_unchecked(&event->child_total_time_enabled);
65944 *running += event->total_time_running +
65945 - atomic64_read(&event->child_total_time_running);
65946 + atomic64_read_unchecked(&event->child_total_time_running);
65947
65948 list_for_each_entry(child, &event->child_list, child_list) {
65949 total += perf_event_read(child);
65950 @@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
65951 userpg->offset -= local64_read(&event->hw.prev_count);
65952
65953 userpg->time_enabled = enabled +
65954 - atomic64_read(&event->child_total_time_enabled);
65955 + atomic64_read_unchecked(&event->child_total_time_enabled);
65956
65957 userpg->time_running = running +
65958 - atomic64_read(&event->child_total_time_running);
65959 + atomic64_read_unchecked(&event->child_total_time_running);
65960
65961 barrier();
65962 ++userpg->lock;
65963 @@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65964 values[n++] = perf_event_count(event);
65965 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65966 values[n++] = enabled +
65967 - atomic64_read(&event->child_total_time_enabled);
65968 + atomic64_read_unchecked(&event->child_total_time_enabled);
65969 }
65970 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65971 values[n++] = running +
65972 - atomic64_read(&event->child_total_time_running);
65973 + atomic64_read_unchecked(&event->child_total_time_running);
65974 }
65975 if (read_format & PERF_FORMAT_ID)
65976 values[n++] = primary_event_id(event);
65977 @@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65978 * need to add enough zero bytes after the string to handle
65979 * the 64bit alignment we do later.
65980 */
65981 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65982 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65983 if (!buf) {
65984 name = strncpy(tmp, "//enomem", sizeof(tmp));
65985 goto got_name;
65986 }
65987 - name = d_path(&file->f_path, buf, PATH_MAX);
65988 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65989 if (IS_ERR(name)) {
65990 name = strncpy(tmp, "//toolong", sizeof(tmp));
65991 goto got_name;
65992 @@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65993 event->parent = parent_event;
65994
65995 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65996 - event->id = atomic64_inc_return(&perf_event_id);
65997 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65998
65999 event->state = PERF_EVENT_STATE_INACTIVE;
66000
66001 @@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
66002 /*
66003 * Add back the child's count to the parent's count:
66004 */
66005 - atomic64_add(child_val, &parent_event->child_count);
66006 - atomic64_add(child_event->total_time_enabled,
66007 + atomic64_add_unchecked(child_val, &parent_event->child_count);
66008 + atomic64_add_unchecked(child_event->total_time_enabled,
66009 &parent_event->child_total_time_enabled);
66010 - atomic64_add(child_event->total_time_running,
66011 + atomic64_add_unchecked(child_event->total_time_running,
66012 &parent_event->child_total_time_running);
66013
66014 /*
66015 diff --git a/kernel/exit.c b/kernel/exit.c
66016 index 4b4042f..5bdd8d5 100644
66017 --- a/kernel/exit.c
66018 +++ b/kernel/exit.c
66019 @@ -58,6 +58,10 @@
66020 #include <asm/pgtable.h>
66021 #include <asm/mmu_context.h>
66022
66023 +#ifdef CONFIG_GRKERNSEC
66024 +extern rwlock_t grsec_exec_file_lock;
66025 +#endif
66026 +
66027 static void exit_mm(struct task_struct * tsk);
66028
66029 static void __unhash_process(struct task_struct *p, bool group_dead)
66030 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
66031 struct task_struct *leader;
66032 int zap_leader;
66033 repeat:
66034 +#ifdef CONFIG_NET
66035 + gr_del_task_from_ip_table(p);
66036 +#endif
66037 +
66038 /* don't need to get the RCU readlock here - the process is dead and
66039 * can't be modifying its own credentials. But shut RCU-lockdep up */
66040 rcu_read_lock();
66041 @@ -381,7 +389,7 @@ int allow_signal(int sig)
66042 * know it'll be handled, so that they don't get converted to
66043 * SIGKILL or just silently dropped.
66044 */
66045 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66046 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66047 recalc_sigpending();
66048 spin_unlock_irq(&current->sighand->siglock);
66049 return 0;
66050 @@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
66051 vsnprintf(current->comm, sizeof(current->comm), name, args);
66052 va_end(args);
66053
66054 +#ifdef CONFIG_GRKERNSEC
66055 + write_lock(&grsec_exec_file_lock);
66056 + if (current->exec_file) {
66057 + fput(current->exec_file);
66058 + current->exec_file = NULL;
66059 + }
66060 + write_unlock(&grsec_exec_file_lock);
66061 +#endif
66062 +
66063 + gr_set_kernel_label(current);
66064 +
66065 /*
66066 * If we were started as result of loading a module, close all of the
66067 * user space pages. We don't need them, and if we didn't close them
66068 @@ -892,6 +911,8 @@ void do_exit(long code)
66069 struct task_struct *tsk = current;
66070 int group_dead;
66071
66072 + set_fs(USER_DS);
66073 +
66074 profile_task_exit(tsk);
66075
66076 WARN_ON(blk_needs_flush_plug(tsk));
66077 @@ -908,7 +929,6 @@ void do_exit(long code)
66078 * mm_release()->clear_child_tid() from writing to a user-controlled
66079 * kernel address.
66080 */
66081 - set_fs(USER_DS);
66082
66083 ptrace_event(PTRACE_EVENT_EXIT, code);
66084
66085 @@ -969,6 +989,9 @@ void do_exit(long code)
66086 tsk->exit_code = code;
66087 taskstats_exit(tsk, group_dead);
66088
66089 + gr_acl_handle_psacct(tsk, code);
66090 + gr_acl_handle_exit();
66091 +
66092 exit_mm(tsk);
66093
66094 if (group_dead)
66095 @@ -1085,7 +1108,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
66096 * Take down every thread in the group. This is called by fatal signals
66097 * as well as by sys_exit_group (below).
66098 */
66099 -void
66100 +__noreturn void
66101 do_group_exit(int exit_code)
66102 {
66103 struct signal_struct *sig = current->signal;
66104 diff --git a/kernel/fork.c b/kernel/fork.c
66105 index 26a7a67..a1053f9 100644
66106 --- a/kernel/fork.c
66107 +++ b/kernel/fork.c
66108 @@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66109 *stackend = STACK_END_MAGIC; /* for overflow detection */
66110
66111 #ifdef CONFIG_CC_STACKPROTECTOR
66112 - tsk->stack_canary = get_random_int();
66113 + tsk->stack_canary = pax_get_random_long();
66114 #endif
66115
66116 /*
66117 @@ -308,13 +308,77 @@ out:
66118 }
66119
66120 #ifdef CONFIG_MMU
66121 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66122 +{
66123 + struct vm_area_struct *tmp;
66124 + unsigned long charge;
66125 + struct mempolicy *pol;
66126 + struct file *file;
66127 +
66128 + charge = 0;
66129 + if (mpnt->vm_flags & VM_ACCOUNT) {
66130 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66131 + if (security_vm_enough_memory(len))
66132 + goto fail_nomem;
66133 + charge = len;
66134 + }
66135 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66136 + if (!tmp)
66137 + goto fail_nomem;
66138 + *tmp = *mpnt;
66139 + tmp->vm_mm = mm;
66140 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
66141 + pol = mpol_dup(vma_policy(mpnt));
66142 + if (IS_ERR(pol))
66143 + goto fail_nomem_policy;
66144 + vma_set_policy(tmp, pol);
66145 + if (anon_vma_fork(tmp, mpnt))
66146 + goto fail_nomem_anon_vma_fork;
66147 + tmp->vm_flags &= ~VM_LOCKED;
66148 + tmp->vm_next = tmp->vm_prev = NULL;
66149 + tmp->vm_mirror = NULL;
66150 + file = tmp->vm_file;
66151 + if (file) {
66152 + struct inode *inode = file->f_path.dentry->d_inode;
66153 + struct address_space *mapping = file->f_mapping;
66154 +
66155 + get_file(file);
66156 + if (tmp->vm_flags & VM_DENYWRITE)
66157 + atomic_dec(&inode->i_writecount);
66158 + mutex_lock(&mapping->i_mmap_mutex);
66159 + if (tmp->vm_flags & VM_SHARED)
66160 + mapping->i_mmap_writable++;
66161 + flush_dcache_mmap_lock(mapping);
66162 + /* insert tmp into the share list, just after mpnt */
66163 + vma_prio_tree_add(tmp, mpnt);
66164 + flush_dcache_mmap_unlock(mapping);
66165 + mutex_unlock(&mapping->i_mmap_mutex);
66166 + }
66167 +
66168 + /*
66169 + * Clear hugetlb-related page reserves for children. This only
66170 + * affects MAP_PRIVATE mappings. Faults generated by the child
66171 + * are not guaranteed to succeed, even if read-only
66172 + */
66173 + if (is_vm_hugetlb_page(tmp))
66174 + reset_vma_resv_huge_pages(tmp);
66175 +
66176 + return tmp;
66177 +
66178 +fail_nomem_anon_vma_fork:
66179 + mpol_put(pol);
66180 +fail_nomem_policy:
66181 + kmem_cache_free(vm_area_cachep, tmp);
66182 +fail_nomem:
66183 + vm_unacct_memory(charge);
66184 + return NULL;
66185 +}
66186 +
66187 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66188 {
66189 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66190 struct rb_node **rb_link, *rb_parent;
66191 int retval;
66192 - unsigned long charge;
66193 - struct mempolicy *pol;
66194
66195 down_write(&oldmm->mmap_sem);
66196 flush_cache_dup_mm(oldmm);
66197 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66198 mm->locked_vm = 0;
66199 mm->mmap = NULL;
66200 mm->mmap_cache = NULL;
66201 - mm->free_area_cache = oldmm->mmap_base;
66202 - mm->cached_hole_size = ~0UL;
66203 + mm->free_area_cache = oldmm->free_area_cache;
66204 + mm->cached_hole_size = oldmm->cached_hole_size;
66205 mm->map_count = 0;
66206 cpumask_clear(mm_cpumask(mm));
66207 mm->mm_rb = RB_ROOT;
66208 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66209
66210 prev = NULL;
66211 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66212 - struct file *file;
66213 -
66214 if (mpnt->vm_flags & VM_DONTCOPY) {
66215 long pages = vma_pages(mpnt);
66216 mm->total_vm -= pages;
66217 @@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66218 -pages);
66219 continue;
66220 }
66221 - charge = 0;
66222 - if (mpnt->vm_flags & VM_ACCOUNT) {
66223 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66224 - if (security_vm_enough_memory(len))
66225 - goto fail_nomem;
66226 - charge = len;
66227 + tmp = dup_vma(mm, mpnt);
66228 + if (!tmp) {
66229 + retval = -ENOMEM;
66230 + goto out;
66231 }
66232 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66233 - if (!tmp)
66234 - goto fail_nomem;
66235 - *tmp = *mpnt;
66236 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
66237 - pol = mpol_dup(vma_policy(mpnt));
66238 - retval = PTR_ERR(pol);
66239 - if (IS_ERR(pol))
66240 - goto fail_nomem_policy;
66241 - vma_set_policy(tmp, pol);
66242 - tmp->vm_mm = mm;
66243 - if (anon_vma_fork(tmp, mpnt))
66244 - goto fail_nomem_anon_vma_fork;
66245 - tmp->vm_flags &= ~VM_LOCKED;
66246 - tmp->vm_next = tmp->vm_prev = NULL;
66247 - file = tmp->vm_file;
66248 - if (file) {
66249 - struct inode *inode = file->f_path.dentry->d_inode;
66250 - struct address_space *mapping = file->f_mapping;
66251 -
66252 - get_file(file);
66253 - if (tmp->vm_flags & VM_DENYWRITE)
66254 - atomic_dec(&inode->i_writecount);
66255 - mutex_lock(&mapping->i_mmap_mutex);
66256 - if (tmp->vm_flags & VM_SHARED)
66257 - mapping->i_mmap_writable++;
66258 - flush_dcache_mmap_lock(mapping);
66259 - /* insert tmp into the share list, just after mpnt */
66260 - vma_prio_tree_add(tmp, mpnt);
66261 - flush_dcache_mmap_unlock(mapping);
66262 - mutex_unlock(&mapping->i_mmap_mutex);
66263 - }
66264 -
66265 - /*
66266 - * Clear hugetlb-related page reserves for children. This only
66267 - * affects MAP_PRIVATE mappings. Faults generated by the child
66268 - * are not guaranteed to succeed, even if read-only
66269 - */
66270 - if (is_vm_hugetlb_page(tmp))
66271 - reset_vma_resv_huge_pages(tmp);
66272
66273 /*
66274 * Link in the new vma and copy the page table entries.
66275 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66276 if (retval)
66277 goto out;
66278 }
66279 +
66280 +#ifdef CONFIG_PAX_SEGMEXEC
66281 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66282 + struct vm_area_struct *mpnt_m;
66283 +
66284 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66285 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66286 +
66287 + if (!mpnt->vm_mirror)
66288 + continue;
66289 +
66290 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66291 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66292 + mpnt->vm_mirror = mpnt_m;
66293 + } else {
66294 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66295 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66296 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66297 + mpnt->vm_mirror->vm_mirror = mpnt;
66298 + }
66299 + }
66300 + BUG_ON(mpnt_m);
66301 + }
66302 +#endif
66303 +
66304 /* a new mm has just been created */
66305 arch_dup_mmap(oldmm, mm);
66306 retval = 0;
66307 @@ -429,14 +474,6 @@ out:
66308 flush_tlb_mm(oldmm);
66309 up_write(&oldmm->mmap_sem);
66310 return retval;
66311 -fail_nomem_anon_vma_fork:
66312 - mpol_put(pol);
66313 -fail_nomem_policy:
66314 - kmem_cache_free(vm_area_cachep, tmp);
66315 -fail_nomem:
66316 - retval = -ENOMEM;
66317 - vm_unacct_memory(charge);
66318 - goto out;
66319 }
66320
66321 static inline int mm_alloc_pgd(struct mm_struct *mm)
66322 @@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66323 return ERR_PTR(err);
66324
66325 mm = get_task_mm(task);
66326 - if (mm && mm != current->mm &&
66327 - !ptrace_may_access(task, mode)) {
66328 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66329 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66330 mmput(mm);
66331 mm = ERR_PTR(-EACCES);
66332 }
66333 @@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66334 spin_unlock(&fs->lock);
66335 return -EAGAIN;
66336 }
66337 - fs->users++;
66338 + atomic_inc(&fs->users);
66339 spin_unlock(&fs->lock);
66340 return 0;
66341 }
66342 tsk->fs = copy_fs_struct(fs);
66343 if (!tsk->fs)
66344 return -ENOMEM;
66345 + gr_set_chroot_entries(tsk, &tsk->fs->root);
66346 return 0;
66347 }
66348
66349 @@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66350 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66351 #endif
66352 retval = -EAGAIN;
66353 +
66354 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66355 +
66356 if (atomic_read(&p->real_cred->user->processes) >=
66357 task_rlimit(p, RLIMIT_NPROC)) {
66358 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66359 @@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66360 if (clone_flags & CLONE_THREAD)
66361 p->tgid = current->tgid;
66362
66363 + gr_copy_label(p);
66364 +
66365 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66366 /*
66367 * Clear TID on mm_release()?
66368 @@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
66369 bad_fork_free:
66370 free_task(p);
66371 fork_out:
66372 + gr_log_forkfail(retval);
66373 +
66374 return ERR_PTR(retval);
66375 }
66376
66377 @@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
66378 if (clone_flags & CLONE_PARENT_SETTID)
66379 put_user(nr, parent_tidptr);
66380
66381 + gr_handle_brute_check();
66382 +
66383 if (clone_flags & CLONE_VFORK) {
66384 p->vfork_done = &vfork;
66385 init_completion(&vfork);
66386 @@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66387 return 0;
66388
66389 /* don't need lock here; in the worst case we'll do useless copy */
66390 - if (fs->users == 1)
66391 + if (atomic_read(&fs->users) == 1)
66392 return 0;
66393
66394 *new_fsp = copy_fs_struct(fs);
66395 @@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66396 fs = current->fs;
66397 spin_lock(&fs->lock);
66398 current->fs = new_fs;
66399 - if (--fs->users)
66400 + gr_set_chroot_entries(current, &current->fs->root);
66401 + if (atomic_dec_return(&fs->users))
66402 new_fs = NULL;
66403 else
66404 new_fs = fs;
66405 diff --git a/kernel/futex.c b/kernel/futex.c
66406 index 866c9d5..5c5f828 100644
66407 --- a/kernel/futex.c
66408 +++ b/kernel/futex.c
66409 @@ -54,6 +54,7 @@
66410 #include <linux/mount.h>
66411 #include <linux/pagemap.h>
66412 #include <linux/syscalls.h>
66413 +#include <linux/ptrace.h>
66414 #include <linux/signal.h>
66415 #include <linux/export.h>
66416 #include <linux/magic.h>
66417 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66418 struct page *page, *page_head;
66419 int err, ro = 0;
66420
66421 +#ifdef CONFIG_PAX_SEGMEXEC
66422 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66423 + return -EFAULT;
66424 +#endif
66425 +
66426 /*
66427 * The futex address must be "naturally" aligned.
66428 */
66429 @@ -2721,6 +2727,7 @@ static int __init futex_init(void)
66430 {
66431 u32 curval;
66432 int i;
66433 + mm_segment_t oldfs;
66434
66435 /*
66436 * This will fail and we want it. Some arch implementations do
66437 @@ -2732,8 +2739,11 @@ static int __init futex_init(void)
66438 * implementation, the non-functional ones will return
66439 * -ENOSYS.
66440 */
66441 + oldfs = get_fs();
66442 + set_fs(USER_DS);
66443 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66444 futex_cmpxchg_enabled = 1;
66445 + set_fs(oldfs);
66446
66447 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66448 plist_head_init(&futex_queues[i].chain);
66449 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66450 index 9b22d03..6295b62 100644
66451 --- a/kernel/gcov/base.c
66452 +++ b/kernel/gcov/base.c
66453 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66454 }
66455
66456 #ifdef CONFIG_MODULES
66457 -static inline int within(void *addr, void *start, unsigned long size)
66458 -{
66459 - return ((addr >= start) && (addr < start + size));
66460 -}
66461 -
66462 /* Update list and generate events when modules are unloaded. */
66463 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66464 void *data)
66465 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66466 prev = NULL;
66467 /* Remove entries located in module from linked list. */
66468 for (info = gcov_info_head; info; info = info->next) {
66469 - if (within(info, mod->module_core, mod->core_size)) {
66470 + if (within_module_core_rw((unsigned long)info, mod)) {
66471 if (prev)
66472 prev->next = info->next;
66473 else
66474 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66475 index ae34bf5..4e2f3d0 100644
66476 --- a/kernel/hrtimer.c
66477 +++ b/kernel/hrtimer.c
66478 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
66479 local_irq_restore(flags);
66480 }
66481
66482 -static void run_hrtimer_softirq(struct softirq_action *h)
66483 +static void run_hrtimer_softirq(void)
66484 {
66485 hrtimer_peek_ahead_timers();
66486 }
66487 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66488 index 01d3b70..9e4d098 100644
66489 --- a/kernel/jump_label.c
66490 +++ b/kernel/jump_label.c
66491 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66492
66493 size = (((unsigned long)stop - (unsigned long)start)
66494 / sizeof(struct jump_entry));
66495 + pax_open_kernel();
66496 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66497 + pax_close_kernel();
66498 }
66499
66500 static void jump_label_update(struct jump_label_key *key, int enable);
66501 @@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66502 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66503 struct jump_entry *iter;
66504
66505 + pax_open_kernel();
66506 for (iter = iter_start; iter < iter_stop; iter++) {
66507 if (within_module_init(iter->code, mod))
66508 iter->code = 0;
66509 }
66510 + pax_close_kernel();
66511 }
66512
66513 static int
66514 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66515 index 079f1d3..a407562 100644
66516 --- a/kernel/kallsyms.c
66517 +++ b/kernel/kallsyms.c
66518 @@ -11,6 +11,9 @@
66519 * Changed the compression method from stem compression to "table lookup"
66520 * compression (see scripts/kallsyms.c for a more complete description)
66521 */
66522 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66523 +#define __INCLUDED_BY_HIDESYM 1
66524 +#endif
66525 #include <linux/kallsyms.h>
66526 #include <linux/module.h>
66527 #include <linux/init.h>
66528 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66529
66530 static inline int is_kernel_inittext(unsigned long addr)
66531 {
66532 + if (system_state != SYSTEM_BOOTING)
66533 + return 0;
66534 +
66535 if (addr >= (unsigned long)_sinittext
66536 && addr <= (unsigned long)_einittext)
66537 return 1;
66538 return 0;
66539 }
66540
66541 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66542 +#ifdef CONFIG_MODULES
66543 +static inline int is_module_text(unsigned long addr)
66544 +{
66545 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66546 + return 1;
66547 +
66548 + addr = ktla_ktva(addr);
66549 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66550 +}
66551 +#else
66552 +static inline int is_module_text(unsigned long addr)
66553 +{
66554 + return 0;
66555 +}
66556 +#endif
66557 +#endif
66558 +
66559 static inline int is_kernel_text(unsigned long addr)
66560 {
66561 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66562 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66563
66564 static inline int is_kernel(unsigned long addr)
66565 {
66566 +
66567 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66568 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66569 + return 1;
66570 +
66571 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66572 +#else
66573 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66574 +#endif
66575 +
66576 return 1;
66577 return in_gate_area_no_mm(addr);
66578 }
66579
66580 static int is_ksym_addr(unsigned long addr)
66581 {
66582 +
66583 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66584 + if (is_module_text(addr))
66585 + return 0;
66586 +#endif
66587 +
66588 if (all_var)
66589 return is_kernel(addr);
66590
66591 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66592
66593 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66594 {
66595 - iter->name[0] = '\0';
66596 iter->nameoff = get_symbol_offset(new_pos);
66597 iter->pos = new_pos;
66598 }
66599 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66600 {
66601 struct kallsym_iter *iter = m->private;
66602
66603 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66604 + if (current_uid())
66605 + return 0;
66606 +#endif
66607 +
66608 /* Some debugging symbols have no name. Ignore them. */
66609 if (!iter->name[0])
66610 return 0;
66611 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66612 struct kallsym_iter *iter;
66613 int ret;
66614
66615 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66616 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66617 if (!iter)
66618 return -ENOMEM;
66619 reset_iter(iter, 0);
66620 diff --git a/kernel/kexec.c b/kernel/kexec.c
66621 index 7b08867..3bac516 100644
66622 --- a/kernel/kexec.c
66623 +++ b/kernel/kexec.c
66624 @@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66625 unsigned long flags)
66626 {
66627 struct compat_kexec_segment in;
66628 - struct kexec_segment out, __user *ksegments;
66629 + struct kexec_segment out;
66630 + struct kexec_segment __user *ksegments;
66631 unsigned long i, result;
66632
66633 /* Don't allow clients that don't understand the native
66634 diff --git a/kernel/kmod.c b/kernel/kmod.c
66635 index a3a46cb..f2e42f8 100644
66636 --- a/kernel/kmod.c
66637 +++ b/kernel/kmod.c
66638 @@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66639 * If module auto-loading support is disabled then this function
66640 * becomes a no-operation.
66641 */
66642 -int __request_module(bool wait, const char *fmt, ...)
66643 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66644 {
66645 - va_list args;
66646 char module_name[MODULE_NAME_LEN];
66647 unsigned int max_modprobes;
66648 int ret;
66649 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66650 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66651 static char *envp[] = { "HOME=/",
66652 "TERM=linux",
66653 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66654 @@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
66655 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66656 static int kmod_loop_msg;
66657
66658 - va_start(args, fmt);
66659 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66660 - va_end(args);
66661 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66662 if (ret >= MODULE_NAME_LEN)
66663 return -ENAMETOOLONG;
66664
66665 @@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
66666 if (ret)
66667 return ret;
66668
66669 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66670 + if (!current_uid()) {
66671 + /* hack to workaround consolekit/udisks stupidity */
66672 + read_lock(&tasklist_lock);
66673 + if (!strcmp(current->comm, "mount") &&
66674 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66675 + read_unlock(&tasklist_lock);
66676 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66677 + return -EPERM;
66678 + }
66679 + read_unlock(&tasklist_lock);
66680 + }
66681 +#endif
66682 +
66683 /* If modprobe needs a service that is in a module, we get a recursive
66684 * loop. Limit the number of running kmod threads to max_threads/2 or
66685 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66686 @@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
66687 atomic_dec(&kmod_concurrent);
66688 return ret;
66689 }
66690 +
66691 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66692 +{
66693 + va_list args;
66694 + int ret;
66695 +
66696 + va_start(args, fmt);
66697 + ret = ____request_module(wait, module_param, fmt, args);
66698 + va_end(args);
66699 +
66700 + return ret;
66701 +}
66702 +
66703 +int __request_module(bool wait, const char *fmt, ...)
66704 +{
66705 + va_list args;
66706 + int ret;
66707 +
66708 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66709 + if (current_uid()) {
66710 + char module_param[MODULE_NAME_LEN];
66711 +
66712 + memset(module_param, 0, sizeof(module_param));
66713 +
66714 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66715 +
66716 + va_start(args, fmt);
66717 + ret = ____request_module(wait, module_param, fmt, args);
66718 + va_end(args);
66719 +
66720 + return ret;
66721 + }
66722 +#endif
66723 +
66724 + va_start(args, fmt);
66725 + ret = ____request_module(wait, NULL, fmt, args);
66726 + va_end(args);
66727 +
66728 + return ret;
66729 +}
66730 +
66731 EXPORT_SYMBOL(__request_module);
66732 #endif /* CONFIG_MODULES */
66733
66734 @@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
66735 *
66736 * Thus the __user pointer cast is valid here.
66737 */
66738 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66739 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66740
66741 /*
66742 * If ret is 0, either ____call_usermodehelper failed and the
66743 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66744 index c62b854..cb67968 100644
66745 --- a/kernel/kprobes.c
66746 +++ b/kernel/kprobes.c
66747 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66748 * kernel image and loaded module images reside. This is required
66749 * so x86_64 can correctly handle the %rip-relative fixups.
66750 */
66751 - kip->insns = module_alloc(PAGE_SIZE);
66752 + kip->insns = module_alloc_exec(PAGE_SIZE);
66753 if (!kip->insns) {
66754 kfree(kip);
66755 return NULL;
66756 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66757 */
66758 if (!list_is_singular(&kip->list)) {
66759 list_del(&kip->list);
66760 - module_free(NULL, kip->insns);
66761 + module_free_exec(NULL, kip->insns);
66762 kfree(kip);
66763 }
66764 return 1;
66765 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66766 {
66767 int i, err = 0;
66768 unsigned long offset = 0, size = 0;
66769 - char *modname, namebuf[128];
66770 + char *modname, namebuf[KSYM_NAME_LEN];
66771 const char *symbol_name;
66772 void *addr;
66773 struct kprobe_blackpoint *kb;
66774 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66775 const char *sym = NULL;
66776 unsigned int i = *(loff_t *) v;
66777 unsigned long offset = 0;
66778 - char *modname, namebuf[128];
66779 + char *modname, namebuf[KSYM_NAME_LEN];
66780
66781 head = &kprobe_table[i];
66782 preempt_disable();
66783 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66784 index 4e316e1..5501eef 100644
66785 --- a/kernel/ksysfs.c
66786 +++ b/kernel/ksysfs.c
66787 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66788 {
66789 if (count+1 > UEVENT_HELPER_PATH_LEN)
66790 return -ENOENT;
66791 + if (!capable(CAP_SYS_ADMIN))
66792 + return -EPERM;
66793 memcpy(uevent_helper, buf, count);
66794 uevent_helper[count] = '\0';
66795 if (count && uevent_helper[count-1] == '\n')
66796 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66797 index 8889f7d..95319b7 100644
66798 --- a/kernel/lockdep.c
66799 +++ b/kernel/lockdep.c
66800 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
66801 end = (unsigned long) &_end,
66802 addr = (unsigned long) obj;
66803
66804 +#ifdef CONFIG_PAX_KERNEXEC
66805 + start = ktla_ktva(start);
66806 +#endif
66807 +
66808 /*
66809 * static variable?
66810 */
66811 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66812 if (!static_obj(lock->key)) {
66813 debug_locks_off();
66814 printk("INFO: trying to register non-static key.\n");
66815 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66816 printk("the code is fine but needs lockdep annotation.\n");
66817 printk("turning off the locking correctness validator.\n");
66818 dump_stack();
66819 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66820 if (!class)
66821 return 0;
66822 }
66823 - atomic_inc((atomic_t *)&class->ops);
66824 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66825 if (very_verbose(class)) {
66826 printk("\nacquire class [%p] %s", class->key, class->name);
66827 if (class->name_version > 1)
66828 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66829 index 91c32a0..b2c71c5 100644
66830 --- a/kernel/lockdep_proc.c
66831 +++ b/kernel/lockdep_proc.c
66832 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66833
66834 static void print_name(struct seq_file *m, struct lock_class *class)
66835 {
66836 - char str[128];
66837 + char str[KSYM_NAME_LEN];
66838 const char *name = class->name;
66839
66840 if (!name) {
66841 diff --git a/kernel/module.c b/kernel/module.c
66842 index 3d56b6f..2a22bd0 100644
66843 --- a/kernel/module.c
66844 +++ b/kernel/module.c
66845 @@ -58,6 +58,7 @@
66846 #include <linux/jump_label.h>
66847 #include <linux/pfn.h>
66848 #include <linux/bsearch.h>
66849 +#include <linux/grsecurity.h>
66850
66851 #define CREATE_TRACE_POINTS
66852 #include <trace/events/module.h>
66853 @@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66854
66855 /* Bounds of module allocation, for speeding __module_address.
66856 * Protected by module_mutex. */
66857 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66858 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66859 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66860
66861 int register_module_notifier(struct notifier_block * nb)
66862 {
66863 @@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66864 return true;
66865
66866 list_for_each_entry_rcu(mod, &modules, list) {
66867 - struct symsearch arr[] = {
66868 + struct symsearch modarr[] = {
66869 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66870 NOT_GPL_ONLY, false },
66871 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66872 @@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66873 #endif
66874 };
66875
66876 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66877 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66878 return true;
66879 }
66880 return false;
66881 @@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66882 static int percpu_modalloc(struct module *mod,
66883 unsigned long size, unsigned long align)
66884 {
66885 - if (align > PAGE_SIZE) {
66886 + if (align-1 >= PAGE_SIZE) {
66887 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66888 mod->name, align, PAGE_SIZE);
66889 align = PAGE_SIZE;
66890 @@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
66891 static ssize_t show_coresize(struct module_attribute *mattr,
66892 struct module_kobject *mk, char *buffer)
66893 {
66894 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66895 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66896 }
66897
66898 static struct module_attribute modinfo_coresize =
66899 @@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
66900 static ssize_t show_initsize(struct module_attribute *mattr,
66901 struct module_kobject *mk, char *buffer)
66902 {
66903 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66904 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66905 }
66906
66907 static struct module_attribute modinfo_initsize =
66908 @@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
66909 */
66910 #ifdef CONFIG_SYSFS
66911
66912 -#ifdef CONFIG_KALLSYMS
66913 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66914 static inline bool sect_empty(const Elf_Shdr *sect)
66915 {
66916 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66917 @@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
66918
66919 static void unset_module_core_ro_nx(struct module *mod)
66920 {
66921 - set_page_attributes(mod->module_core + mod->core_text_size,
66922 - mod->module_core + mod->core_size,
66923 + set_page_attributes(mod->module_core_rw,
66924 + mod->module_core_rw + mod->core_size_rw,
66925 set_memory_x);
66926 - set_page_attributes(mod->module_core,
66927 - mod->module_core + mod->core_ro_size,
66928 + set_page_attributes(mod->module_core_rx,
66929 + mod->module_core_rx + mod->core_size_rx,
66930 set_memory_rw);
66931 }
66932
66933 static void unset_module_init_ro_nx(struct module *mod)
66934 {
66935 - set_page_attributes(mod->module_init + mod->init_text_size,
66936 - mod->module_init + mod->init_size,
66937 + set_page_attributes(mod->module_init_rw,
66938 + mod->module_init_rw + mod->init_size_rw,
66939 set_memory_x);
66940 - set_page_attributes(mod->module_init,
66941 - mod->module_init + mod->init_ro_size,
66942 + set_page_attributes(mod->module_init_rx,
66943 + mod->module_init_rx + mod->init_size_rx,
66944 set_memory_rw);
66945 }
66946
66947 @@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
66948
66949 mutex_lock(&module_mutex);
66950 list_for_each_entry_rcu(mod, &modules, list) {
66951 - if ((mod->module_core) && (mod->core_text_size)) {
66952 - set_page_attributes(mod->module_core,
66953 - mod->module_core + mod->core_text_size,
66954 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66955 + set_page_attributes(mod->module_core_rx,
66956 + mod->module_core_rx + mod->core_size_rx,
66957 set_memory_rw);
66958 }
66959 - if ((mod->module_init) && (mod->init_text_size)) {
66960 - set_page_attributes(mod->module_init,
66961 - mod->module_init + mod->init_text_size,
66962 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66963 + set_page_attributes(mod->module_init_rx,
66964 + mod->module_init_rx + mod->init_size_rx,
66965 set_memory_rw);
66966 }
66967 }
66968 @@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
66969
66970 mutex_lock(&module_mutex);
66971 list_for_each_entry_rcu(mod, &modules, list) {
66972 - if ((mod->module_core) && (mod->core_text_size)) {
66973 - set_page_attributes(mod->module_core,
66974 - mod->module_core + mod->core_text_size,
66975 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66976 + set_page_attributes(mod->module_core_rx,
66977 + mod->module_core_rx + mod->core_size_rx,
66978 set_memory_ro);
66979 }
66980 - if ((mod->module_init) && (mod->init_text_size)) {
66981 - set_page_attributes(mod->module_init,
66982 - mod->module_init + mod->init_text_size,
66983 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66984 + set_page_attributes(mod->module_init_rx,
66985 + mod->module_init_rx + mod->init_size_rx,
66986 set_memory_ro);
66987 }
66988 }
66989 @@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
66990
66991 /* This may be NULL, but that's OK */
66992 unset_module_init_ro_nx(mod);
66993 - module_free(mod, mod->module_init);
66994 + module_free(mod, mod->module_init_rw);
66995 + module_free_exec(mod, mod->module_init_rx);
66996 kfree(mod->args);
66997 percpu_modfree(mod);
66998
66999 /* Free lock-classes: */
67000 - lockdep_free_key_range(mod->module_core, mod->core_size);
67001 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67002 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67003
67004 /* Finally, free the core (containing the module structure) */
67005 unset_module_core_ro_nx(mod);
67006 - module_free(mod, mod->module_core);
67007 + module_free_exec(mod, mod->module_core_rx);
67008 + module_free(mod, mod->module_core_rw);
67009
67010 #ifdef CONFIG_MPU
67011 update_protections(current->mm);
67012 @@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67013 unsigned int i;
67014 int ret = 0;
67015 const struct kernel_symbol *ksym;
67016 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67017 + int is_fs_load = 0;
67018 + int register_filesystem_found = 0;
67019 + char *p;
67020 +
67021 + p = strstr(mod->args, "grsec_modharden_fs");
67022 + if (p) {
67023 + char *endptr = p + strlen("grsec_modharden_fs");
67024 + /* copy \0 as well */
67025 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67026 + is_fs_load = 1;
67027 + }
67028 +#endif
67029
67030 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67031 const char *name = info->strtab + sym[i].st_name;
67032
67033 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67034 + /* it's a real shame this will never get ripped and copied
67035 + upstream! ;(
67036 + */
67037 + if (is_fs_load && !strcmp(name, "register_filesystem"))
67038 + register_filesystem_found = 1;
67039 +#endif
67040 +
67041 switch (sym[i].st_shndx) {
67042 case SHN_COMMON:
67043 /* We compiled with -fno-common. These are not
67044 @@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67045 ksym = resolve_symbol_wait(mod, info, name);
67046 /* Ok if resolved. */
67047 if (ksym && !IS_ERR(ksym)) {
67048 + pax_open_kernel();
67049 sym[i].st_value = ksym->value;
67050 + pax_close_kernel();
67051 break;
67052 }
67053
67054 @@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67055 secbase = (unsigned long)mod_percpu(mod);
67056 else
67057 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67058 + pax_open_kernel();
67059 sym[i].st_value += secbase;
67060 + pax_close_kernel();
67061 break;
67062 }
67063 }
67064
67065 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67066 + if (is_fs_load && !register_filesystem_found) {
67067 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67068 + ret = -EPERM;
67069 + }
67070 +#endif
67071 +
67072 return ret;
67073 }
67074
67075 @@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67076 || s->sh_entsize != ~0UL
67077 || strstarts(sname, ".init"))
67078 continue;
67079 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67080 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67081 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67082 + else
67083 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67084 pr_debug("\t%s\n", sname);
67085 }
67086 - switch (m) {
67087 - case 0: /* executable */
67088 - mod->core_size = debug_align(mod->core_size);
67089 - mod->core_text_size = mod->core_size;
67090 - break;
67091 - case 1: /* RO: text and ro-data */
67092 - mod->core_size = debug_align(mod->core_size);
67093 - mod->core_ro_size = mod->core_size;
67094 - break;
67095 - case 3: /* whole core */
67096 - mod->core_size = debug_align(mod->core_size);
67097 - break;
67098 - }
67099 }
67100
67101 pr_debug("Init section allocation order:\n");
67102 @@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67103 || s->sh_entsize != ~0UL
67104 || !strstarts(sname, ".init"))
67105 continue;
67106 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67107 - | INIT_OFFSET_MASK);
67108 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67109 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67110 + else
67111 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67112 + s->sh_entsize |= INIT_OFFSET_MASK;
67113 pr_debug("\t%s\n", sname);
67114 }
67115 - switch (m) {
67116 - case 0: /* executable */
67117 - mod->init_size = debug_align(mod->init_size);
67118 - mod->init_text_size = mod->init_size;
67119 - break;
67120 - case 1: /* RO: text and ro-data */
67121 - mod->init_size = debug_align(mod->init_size);
67122 - mod->init_ro_size = mod->init_size;
67123 - break;
67124 - case 3: /* whole init */
67125 - mod->init_size = debug_align(mod->init_size);
67126 - break;
67127 - }
67128 }
67129 }
67130
67131 @@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67132
67133 /* Put symbol section at end of init part of module. */
67134 symsect->sh_flags |= SHF_ALLOC;
67135 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67136 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67137 info->index.sym) | INIT_OFFSET_MASK;
67138 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67139
67140 @@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67141 }
67142
67143 /* Append room for core symbols at end of core part. */
67144 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67145 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67146 - mod->core_size += strtab_size;
67147 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67148 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67149 + mod->core_size_rx += strtab_size;
67150
67151 /* Put string table section at end of init part of module. */
67152 strsect->sh_flags |= SHF_ALLOC;
67153 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67154 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67155 info->index.str) | INIT_OFFSET_MASK;
67156 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67157 }
67158 @@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67159 /* Make sure we get permanent strtab: don't use info->strtab. */
67160 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67161
67162 + pax_open_kernel();
67163 +
67164 /* Set types up while we still have access to sections. */
67165 for (i = 0; i < mod->num_symtab; i++)
67166 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67167
67168 - mod->core_symtab = dst = mod->module_core + info->symoffs;
67169 - mod->core_strtab = s = mod->module_core + info->stroffs;
67170 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67171 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67172 src = mod->symtab;
67173 *dst = *src;
67174 *s++ = 0;
67175 @@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67176 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
67177 }
67178 mod->core_num_syms = ndst;
67179 +
67180 + pax_close_kernel();
67181 }
67182 #else
67183 static inline void layout_symtab(struct module *mod, struct load_info *info)
67184 @@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
67185 return size == 0 ? NULL : vmalloc_exec(size);
67186 }
67187
67188 -static void *module_alloc_update_bounds(unsigned long size)
67189 +static void *module_alloc_update_bounds_rw(unsigned long size)
67190 {
67191 void *ret = module_alloc(size);
67192
67193 if (ret) {
67194 mutex_lock(&module_mutex);
67195 /* Update module bounds. */
67196 - if ((unsigned long)ret < module_addr_min)
67197 - module_addr_min = (unsigned long)ret;
67198 - if ((unsigned long)ret + size > module_addr_max)
67199 - module_addr_max = (unsigned long)ret + size;
67200 + if ((unsigned long)ret < module_addr_min_rw)
67201 + module_addr_min_rw = (unsigned long)ret;
67202 + if ((unsigned long)ret + size > module_addr_max_rw)
67203 + module_addr_max_rw = (unsigned long)ret + size;
67204 + mutex_unlock(&module_mutex);
67205 + }
67206 + return ret;
67207 +}
67208 +
67209 +static void *module_alloc_update_bounds_rx(unsigned long size)
67210 +{
67211 + void *ret = module_alloc_exec(size);
67212 +
67213 + if (ret) {
67214 + mutex_lock(&module_mutex);
67215 + /* Update module bounds. */
67216 + if ((unsigned long)ret < module_addr_min_rx)
67217 + module_addr_min_rx = (unsigned long)ret;
67218 + if ((unsigned long)ret + size > module_addr_max_rx)
67219 + module_addr_max_rx = (unsigned long)ret + size;
67220 mutex_unlock(&module_mutex);
67221 }
67222 return ret;
67223 @@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
67224 static int check_modinfo(struct module *mod, struct load_info *info)
67225 {
67226 const char *modmagic = get_modinfo(info, "vermagic");
67227 + const char *license = get_modinfo(info, "license");
67228 int err;
67229
67230 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67231 + if (!license || !license_is_gpl_compatible(license))
67232 + return -ENOEXEC;
67233 +#endif
67234 +
67235 /* This is allowed: modprobe --force will invalidate it. */
67236 if (!modmagic) {
67237 err = try_to_force_load(mod, "bad vermagic");
67238 @@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67239 }
67240
67241 /* Set up license info based on the info section */
67242 - set_license(mod, get_modinfo(info, "license"));
67243 + set_license(mod, license);
67244
67245 return 0;
67246 }
67247 @@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
67248 void *ptr;
67249
67250 /* Do the allocs. */
67251 - ptr = module_alloc_update_bounds(mod->core_size);
67252 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67253 /*
67254 * The pointer to this block is stored in the module structure
67255 * which is inside the block. Just mark it as not being a
67256 @@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
67257 if (!ptr)
67258 return -ENOMEM;
67259
67260 - memset(ptr, 0, mod->core_size);
67261 - mod->module_core = ptr;
67262 + memset(ptr, 0, mod->core_size_rw);
67263 + mod->module_core_rw = ptr;
67264
67265 - ptr = module_alloc_update_bounds(mod->init_size);
67266 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67267 /*
67268 * The pointer to this block is stored in the module structure
67269 * which is inside the block. This block doesn't need to be
67270 * scanned as it contains data and code that will be freed
67271 * after the module is initialized.
67272 */
67273 - kmemleak_ignore(ptr);
67274 - if (!ptr && mod->init_size) {
67275 - module_free(mod, mod->module_core);
67276 + kmemleak_not_leak(ptr);
67277 + if (!ptr && mod->init_size_rw) {
67278 + module_free(mod, mod->module_core_rw);
67279 return -ENOMEM;
67280 }
67281 - memset(ptr, 0, mod->init_size);
67282 - mod->module_init = ptr;
67283 + memset(ptr, 0, mod->init_size_rw);
67284 + mod->module_init_rw = ptr;
67285 +
67286 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67287 + kmemleak_not_leak(ptr);
67288 + if (!ptr) {
67289 + module_free(mod, mod->module_init_rw);
67290 + module_free(mod, mod->module_core_rw);
67291 + return -ENOMEM;
67292 + }
67293 +
67294 + pax_open_kernel();
67295 + memset(ptr, 0, mod->core_size_rx);
67296 + pax_close_kernel();
67297 + mod->module_core_rx = ptr;
67298 +
67299 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67300 + kmemleak_not_leak(ptr);
67301 + if (!ptr && mod->init_size_rx) {
67302 + module_free_exec(mod, mod->module_core_rx);
67303 + module_free(mod, mod->module_init_rw);
67304 + module_free(mod, mod->module_core_rw);
67305 + return -ENOMEM;
67306 + }
67307 +
67308 + pax_open_kernel();
67309 + memset(ptr, 0, mod->init_size_rx);
67310 + pax_close_kernel();
67311 + mod->module_init_rx = ptr;
67312
67313 /* Transfer each section which specifies SHF_ALLOC */
67314 pr_debug("final section addresses:\n");
67315 @@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
67316 if (!(shdr->sh_flags & SHF_ALLOC))
67317 continue;
67318
67319 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67320 - dest = mod->module_init
67321 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67322 - else
67323 - dest = mod->module_core + shdr->sh_entsize;
67324 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67325 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67326 + dest = mod->module_init_rw
67327 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67328 + else
67329 + dest = mod->module_init_rx
67330 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67331 + } else {
67332 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67333 + dest = mod->module_core_rw + shdr->sh_entsize;
67334 + else
67335 + dest = mod->module_core_rx + shdr->sh_entsize;
67336 + }
67337 +
67338 + if (shdr->sh_type != SHT_NOBITS) {
67339 +
67340 +#ifdef CONFIG_PAX_KERNEXEC
67341 +#ifdef CONFIG_X86_64
67342 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67343 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67344 +#endif
67345 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67346 + pax_open_kernel();
67347 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67348 + pax_close_kernel();
67349 + } else
67350 +#endif
67351
67352 - if (shdr->sh_type != SHT_NOBITS)
67353 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67354 + }
67355 /* Update sh_addr to point to copy in image. */
67356 - shdr->sh_addr = (unsigned long)dest;
67357 +
67358 +#ifdef CONFIG_PAX_KERNEXEC
67359 + if (shdr->sh_flags & SHF_EXECINSTR)
67360 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67361 + else
67362 +#endif
67363 +
67364 + shdr->sh_addr = (unsigned long)dest;
67365 pr_debug("\t0x%lx %s\n",
67366 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67367 }
67368 @@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
67369 * Do it before processing of module parameters, so the module
67370 * can provide parameter accessor functions of its own.
67371 */
67372 - if (mod->module_init)
67373 - flush_icache_range((unsigned long)mod->module_init,
67374 - (unsigned long)mod->module_init
67375 - + mod->init_size);
67376 - flush_icache_range((unsigned long)mod->module_core,
67377 - (unsigned long)mod->module_core + mod->core_size);
67378 + if (mod->module_init_rx)
67379 + flush_icache_range((unsigned long)mod->module_init_rx,
67380 + (unsigned long)mod->module_init_rx
67381 + + mod->init_size_rx);
67382 + flush_icache_range((unsigned long)mod->module_core_rx,
67383 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67384
67385 set_fs(old_fs);
67386 }
67387 @@ -2802,8 +2901,10 @@ out:
67388 static void module_deallocate(struct module *mod, struct load_info *info)
67389 {
67390 percpu_modfree(mod);
67391 - module_free(mod, mod->module_init);
67392 - module_free(mod, mod->module_core);
67393 + module_free_exec(mod, mod->module_init_rx);
67394 + module_free_exec(mod, mod->module_core_rx);
67395 + module_free(mod, mod->module_init_rw);
67396 + module_free(mod, mod->module_core_rw);
67397 }
67398
67399 int __weak module_finalize(const Elf_Ehdr *hdr,
67400 @@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
67401 if (err)
67402 goto free_unload;
67403
67404 + /* Now copy in args */
67405 + mod->args = strndup_user(uargs, ~0UL >> 1);
67406 + if (IS_ERR(mod->args)) {
67407 + err = PTR_ERR(mod->args);
67408 + goto free_unload;
67409 + }
67410 +
67411 /* Set up MODINFO_ATTR fields */
67412 setup_modinfo(mod, &info);
67413
67414 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67415 + {
67416 + char *p, *p2;
67417 +
67418 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67419 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67420 + err = -EPERM;
67421 + goto free_modinfo;
67422 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67423 + p += strlen("grsec_modharden_normal");
67424 + p2 = strstr(p, "_");
67425 + if (p2) {
67426 + *p2 = '\0';
67427 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67428 + *p2 = '_';
67429 + }
67430 + err = -EPERM;
67431 + goto free_modinfo;
67432 + }
67433 + }
67434 +#endif
67435 +
67436 /* Fix up syms, so that st_value is a pointer to location. */
67437 err = simplify_symbols(mod, &info);
67438 if (err < 0)
67439 @@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
67440
67441 flush_module_icache(mod);
67442
67443 - /* Now copy in args */
67444 - mod->args = strndup_user(uargs, ~0UL >> 1);
67445 - if (IS_ERR(mod->args)) {
67446 - err = PTR_ERR(mod->args);
67447 - goto free_arch_cleanup;
67448 - }
67449 -
67450 /* Mark state as coming so strong_try_module_get() ignores us. */
67451 mod->state = MODULE_STATE_COMING;
67452
67453 @@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
67454 unlock:
67455 mutex_unlock(&module_mutex);
67456 synchronize_sched();
67457 - kfree(mod->args);
67458 - free_arch_cleanup:
67459 module_arch_cleanup(mod);
67460 free_modinfo:
67461 free_modinfo(mod);
67462 + kfree(mod->args);
67463 free_unload:
67464 module_unload_free(mod);
67465 free_module:
67466 @@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67467 MODULE_STATE_COMING, mod);
67468
67469 /* Set RO and NX regions for core */
67470 - set_section_ro_nx(mod->module_core,
67471 - mod->core_text_size,
67472 - mod->core_ro_size,
67473 - mod->core_size);
67474 + set_section_ro_nx(mod->module_core_rx,
67475 + mod->core_size_rx,
67476 + mod->core_size_rx,
67477 + mod->core_size_rx);
67478
67479 /* Set RO and NX regions for init */
67480 - set_section_ro_nx(mod->module_init,
67481 - mod->init_text_size,
67482 - mod->init_ro_size,
67483 - mod->init_size);
67484 + set_section_ro_nx(mod->module_init_rx,
67485 + mod->init_size_rx,
67486 + mod->init_size_rx,
67487 + mod->init_size_rx);
67488
67489 do_mod_ctors(mod);
67490 /* Start the module */
67491 @@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67492 mod->strtab = mod->core_strtab;
67493 #endif
67494 unset_module_init_ro_nx(mod);
67495 - module_free(mod, mod->module_init);
67496 - mod->module_init = NULL;
67497 - mod->init_size = 0;
67498 - mod->init_ro_size = 0;
67499 - mod->init_text_size = 0;
67500 + module_free(mod, mod->module_init_rw);
67501 + module_free_exec(mod, mod->module_init_rx);
67502 + mod->module_init_rw = NULL;
67503 + mod->module_init_rx = NULL;
67504 + mod->init_size_rw = 0;
67505 + mod->init_size_rx = 0;
67506 mutex_unlock(&module_mutex);
67507
67508 return 0;
67509 @@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
67510 unsigned long nextval;
67511
67512 /* At worse, next value is at end of module */
67513 - if (within_module_init(addr, mod))
67514 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67515 + if (within_module_init_rx(addr, mod))
67516 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67517 + else if (within_module_init_rw(addr, mod))
67518 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67519 + else if (within_module_core_rx(addr, mod))
67520 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67521 + else if (within_module_core_rw(addr, mod))
67522 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67523 else
67524 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67525 + return NULL;
67526
67527 /* Scan for closest preceding symbol, and next symbol. (ELF
67528 starts real symbols at 1). */
67529 @@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
67530 char buf[8];
67531
67532 seq_printf(m, "%s %u",
67533 - mod->name, mod->init_size + mod->core_size);
67534 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67535 print_unload_info(m, mod);
67536
67537 /* Informative for users. */
67538 @@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
67539 mod->state == MODULE_STATE_COMING ? "Loading":
67540 "Live");
67541 /* Used by oprofile and other similar tools. */
67542 - seq_printf(m, " 0x%pK", mod->module_core);
67543 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67544
67545 /* Taints info */
67546 if (mod->taints)
67547 @@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
67548
67549 static int __init proc_modules_init(void)
67550 {
67551 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67552 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67553 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67554 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67555 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67556 +#else
67557 proc_create("modules", 0, NULL, &proc_modules_operations);
67558 +#endif
67559 +#else
67560 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67561 +#endif
67562 return 0;
67563 }
67564 module_init(proc_modules_init);
67565 @@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
67566 {
67567 struct module *mod;
67568
67569 - if (addr < module_addr_min || addr > module_addr_max)
67570 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67571 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67572 return NULL;
67573
67574 list_for_each_entry_rcu(mod, &modules, list)
67575 - if (within_module_core(addr, mod)
67576 - || within_module_init(addr, mod))
67577 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67578 return mod;
67579 return NULL;
67580 }
67581 @@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
67582 */
67583 struct module *__module_text_address(unsigned long addr)
67584 {
67585 - struct module *mod = __module_address(addr);
67586 + struct module *mod;
67587 +
67588 +#ifdef CONFIG_X86_32
67589 + addr = ktla_ktva(addr);
67590 +#endif
67591 +
67592 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67593 + return NULL;
67594 +
67595 + mod = __module_address(addr);
67596 +
67597 if (mod) {
67598 /* Make sure it's within the text section. */
67599 - if (!within(addr, mod->module_init, mod->init_text_size)
67600 - && !within(addr, mod->module_core, mod->core_text_size))
67601 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67602 mod = NULL;
67603 }
67604 return mod;
67605 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67606 index 7e3443f..b2a1e6b 100644
67607 --- a/kernel/mutex-debug.c
67608 +++ b/kernel/mutex-debug.c
67609 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67610 }
67611
67612 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67613 - struct thread_info *ti)
67614 + struct task_struct *task)
67615 {
67616 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67617
67618 /* Mark the current thread as blocked on the lock: */
67619 - ti->task->blocked_on = waiter;
67620 + task->blocked_on = waiter;
67621 }
67622
67623 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67624 - struct thread_info *ti)
67625 + struct task_struct *task)
67626 {
67627 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67628 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67629 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67630 - ti->task->blocked_on = NULL;
67631 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67632 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67633 + task->blocked_on = NULL;
67634
67635 list_del_init(&waiter->list);
67636 waiter->task = NULL;
67637 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67638 index 0799fd3..d06ae3b 100644
67639 --- a/kernel/mutex-debug.h
67640 +++ b/kernel/mutex-debug.h
67641 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67642 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67643 extern void debug_mutex_add_waiter(struct mutex *lock,
67644 struct mutex_waiter *waiter,
67645 - struct thread_info *ti);
67646 + struct task_struct *task);
67647 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67648 - struct thread_info *ti);
67649 + struct task_struct *task);
67650 extern void debug_mutex_unlock(struct mutex *lock);
67651 extern void debug_mutex_init(struct mutex *lock, const char *name,
67652 struct lock_class_key *key);
67653 diff --git a/kernel/mutex.c b/kernel/mutex.c
67654 index 89096dd..f91ebc5 100644
67655 --- a/kernel/mutex.c
67656 +++ b/kernel/mutex.c
67657 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67658 spin_lock_mutex(&lock->wait_lock, flags);
67659
67660 debug_mutex_lock_common(lock, &waiter);
67661 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67662 + debug_mutex_add_waiter(lock, &waiter, task);
67663
67664 /* add waiting tasks to the end of the waitqueue (FIFO): */
67665 list_add_tail(&waiter.list, &lock->wait_list);
67666 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67667 * TASK_UNINTERRUPTIBLE case.)
67668 */
67669 if (unlikely(signal_pending_state(state, task))) {
67670 - mutex_remove_waiter(lock, &waiter,
67671 - task_thread_info(task));
67672 + mutex_remove_waiter(lock, &waiter, task);
67673 mutex_release(&lock->dep_map, 1, ip);
67674 spin_unlock_mutex(&lock->wait_lock, flags);
67675
67676 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67677 done:
67678 lock_acquired(&lock->dep_map, ip);
67679 /* got the lock - rejoice! */
67680 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67681 + mutex_remove_waiter(lock, &waiter, task);
67682 mutex_set_owner(lock);
67683
67684 /* set it to 0 if there are no waiters left: */
67685 diff --git a/kernel/padata.c b/kernel/padata.c
67686 index b452599..5d68f4e 100644
67687 --- a/kernel/padata.c
67688 +++ b/kernel/padata.c
67689 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67690 padata->pd = pd;
67691 padata->cb_cpu = cb_cpu;
67692
67693 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67694 - atomic_set(&pd->seq_nr, -1);
67695 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67696 + atomic_set_unchecked(&pd->seq_nr, -1);
67697
67698 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67699 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67700
67701 target_cpu = padata_cpu_hash(padata);
67702 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67703 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67704 padata_init_pqueues(pd);
67705 padata_init_squeues(pd);
67706 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67707 - atomic_set(&pd->seq_nr, -1);
67708 + atomic_set_unchecked(&pd->seq_nr, -1);
67709 atomic_set(&pd->reorder_objects, 0);
67710 atomic_set(&pd->refcnt, 0);
67711 pd->pinst = pinst;
67712 diff --git a/kernel/panic.c b/kernel/panic.c
67713 index 8ed89a1..e83856a 100644
67714 --- a/kernel/panic.c
67715 +++ b/kernel/panic.c
67716 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67717 const char *board;
67718
67719 printk(KERN_WARNING "------------[ cut here ]------------\n");
67720 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67721 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67722 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67723 if (board)
67724 printk(KERN_WARNING "Hardware name: %s\n", board);
67725 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67726 */
67727 void __stack_chk_fail(void)
67728 {
67729 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67730 + dump_stack();
67731 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67732 __builtin_return_address(0));
67733 }
67734 EXPORT_SYMBOL(__stack_chk_fail);
67735 diff --git a/kernel/pid.c b/kernel/pid.c
67736 index 9f08dfa..6765c40 100644
67737 --- a/kernel/pid.c
67738 +++ b/kernel/pid.c
67739 @@ -33,6 +33,7 @@
67740 #include <linux/rculist.h>
67741 #include <linux/bootmem.h>
67742 #include <linux/hash.h>
67743 +#include <linux/security.h>
67744 #include <linux/pid_namespace.h>
67745 #include <linux/init_task.h>
67746 #include <linux/syscalls.h>
67747 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67748
67749 int pid_max = PID_MAX_DEFAULT;
67750
67751 -#define RESERVED_PIDS 300
67752 +#define RESERVED_PIDS 500
67753
67754 int pid_max_min = RESERVED_PIDS + 1;
67755 int pid_max_max = PID_MAX_LIMIT;
67756 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67757 */
67758 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67759 {
67760 + struct task_struct *task;
67761 +
67762 rcu_lockdep_assert(rcu_read_lock_held(),
67763 "find_task_by_pid_ns() needs rcu_read_lock()"
67764 " protection");
67765 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67766 +
67767 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67768 +
67769 + if (gr_pid_is_chrooted(task))
67770 + return NULL;
67771 +
67772 + return task;
67773 }
67774
67775 struct task_struct *find_task_by_vpid(pid_t vnr)
67776 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67777 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67778 }
67779
67780 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67781 +{
67782 + rcu_lockdep_assert(rcu_read_lock_held(),
67783 + "find_task_by_pid_ns() needs rcu_read_lock()"
67784 + " protection");
67785 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67786 +}
67787 +
67788 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67789 {
67790 struct pid *pid;
67791 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67792 index 125cb67..a4d1c30 100644
67793 --- a/kernel/posix-cpu-timers.c
67794 +++ b/kernel/posix-cpu-timers.c
67795 @@ -6,6 +6,7 @@
67796 #include <linux/posix-timers.h>
67797 #include <linux/errno.h>
67798 #include <linux/math64.h>
67799 +#include <linux/security.h>
67800 #include <asm/uaccess.h>
67801 #include <linux/kernel_stat.h>
67802 #include <trace/events/timer.h>
67803 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67804
67805 static __init int init_posix_cpu_timers(void)
67806 {
67807 - struct k_clock process = {
67808 + static struct k_clock process = {
67809 .clock_getres = process_cpu_clock_getres,
67810 .clock_get = process_cpu_clock_get,
67811 .timer_create = process_cpu_timer_create,
67812 .nsleep = process_cpu_nsleep,
67813 .nsleep_restart = process_cpu_nsleep_restart,
67814 };
67815 - struct k_clock thread = {
67816 + static struct k_clock thread = {
67817 .clock_getres = thread_cpu_clock_getres,
67818 .clock_get = thread_cpu_clock_get,
67819 .timer_create = thread_cpu_timer_create,
67820 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67821 index 69185ae..cc2847a 100644
67822 --- a/kernel/posix-timers.c
67823 +++ b/kernel/posix-timers.c
67824 @@ -43,6 +43,7 @@
67825 #include <linux/idr.h>
67826 #include <linux/posix-clock.h>
67827 #include <linux/posix-timers.h>
67828 +#include <linux/grsecurity.h>
67829 #include <linux/syscalls.h>
67830 #include <linux/wait.h>
67831 #include <linux/workqueue.h>
67832 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67833 * which we beg off on and pass to do_sys_settimeofday().
67834 */
67835
67836 -static struct k_clock posix_clocks[MAX_CLOCKS];
67837 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67838
67839 /*
67840 * These ones are defined below.
67841 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67842 */
67843 static __init int init_posix_timers(void)
67844 {
67845 - struct k_clock clock_realtime = {
67846 + static struct k_clock clock_realtime = {
67847 .clock_getres = hrtimer_get_res,
67848 .clock_get = posix_clock_realtime_get,
67849 .clock_set = posix_clock_realtime_set,
67850 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67851 .timer_get = common_timer_get,
67852 .timer_del = common_timer_del,
67853 };
67854 - struct k_clock clock_monotonic = {
67855 + static struct k_clock clock_monotonic = {
67856 .clock_getres = hrtimer_get_res,
67857 .clock_get = posix_ktime_get_ts,
67858 .nsleep = common_nsleep,
67859 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67860 .timer_get = common_timer_get,
67861 .timer_del = common_timer_del,
67862 };
67863 - struct k_clock clock_monotonic_raw = {
67864 + static struct k_clock clock_monotonic_raw = {
67865 .clock_getres = hrtimer_get_res,
67866 .clock_get = posix_get_monotonic_raw,
67867 };
67868 - struct k_clock clock_realtime_coarse = {
67869 + static struct k_clock clock_realtime_coarse = {
67870 .clock_getres = posix_get_coarse_res,
67871 .clock_get = posix_get_realtime_coarse,
67872 };
67873 - struct k_clock clock_monotonic_coarse = {
67874 + static struct k_clock clock_monotonic_coarse = {
67875 .clock_getres = posix_get_coarse_res,
67876 .clock_get = posix_get_monotonic_coarse,
67877 };
67878 - struct k_clock clock_boottime = {
67879 + static struct k_clock clock_boottime = {
67880 .clock_getres = hrtimer_get_res,
67881 .clock_get = posix_get_boottime,
67882 .nsleep = common_nsleep,
67883 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67884 return;
67885 }
67886
67887 - posix_clocks[clock_id] = *new_clock;
67888 + posix_clocks[clock_id] = new_clock;
67889 }
67890 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67891
67892 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67893 return (id & CLOCKFD_MASK) == CLOCKFD ?
67894 &clock_posix_dynamic : &clock_posix_cpu;
67895
67896 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67897 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67898 return NULL;
67899 - return &posix_clocks[id];
67900 + return posix_clocks[id];
67901 }
67902
67903 static int common_timer_create(struct k_itimer *new_timer)
67904 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67905 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67906 return -EFAULT;
67907
67908 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67909 + have their clock_set fptr set to a nosettime dummy function
67910 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67911 + call common_clock_set, which calls do_sys_settimeofday, which
67912 + we hook
67913 + */
67914 +
67915 return kc->clock_set(which_clock, &new_tp);
67916 }
67917
67918 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67919 index d523593..68197a4 100644
67920 --- a/kernel/power/poweroff.c
67921 +++ b/kernel/power/poweroff.c
67922 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67923 .enable_mask = SYSRQ_ENABLE_BOOT,
67924 };
67925
67926 -static int pm_sysrq_init(void)
67927 +static int __init pm_sysrq_init(void)
67928 {
67929 register_sysrq_key('o', &sysrq_poweroff_op);
67930 return 0;
67931 diff --git a/kernel/power/process.c b/kernel/power/process.c
67932 index 7aac07a..2d3c6dc 100644
67933 --- a/kernel/power/process.c
67934 +++ b/kernel/power/process.c
67935 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67936 u64 elapsed_csecs64;
67937 unsigned int elapsed_csecs;
67938 bool wakeup = false;
67939 + bool timedout = false;
67940
67941 do_gettimeofday(&start);
67942
67943 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67944
67945 while (true) {
67946 todo = 0;
67947 + if (time_after(jiffies, end_time))
67948 + timedout = true;
67949 read_lock(&tasklist_lock);
67950 do_each_thread(g, p) {
67951 if (p == current || !freeze_task(p))
67952 @@ -60,9 +63,13 @@ static int try_to_freeze_tasks(bool user_only)
67953 * try_to_stop() after schedule() in ptrace/signal
67954 * stop sees TIF_FREEZE.
67955 */
67956 - if (!task_is_stopped_or_traced(p) &&
67957 - !freezer_should_skip(p))
67958 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67959 todo++;
67960 + if (timedout) {
67961 + printk(KERN_ERR "Task refusing to freeze:\n");
67962 + sched_show_task(p);
67963 + }
67964 + }
67965 } while_each_thread(g, p);
67966 read_unlock(&tasklist_lock);
67967
67968 @@ -71,7 +78,7 @@ static int try_to_freeze_tasks(bool user_only)
67969 todo += wq_busy;
67970 }
67971
67972 - if (!todo || time_after(jiffies, end_time))
67973 + if (!todo || timedout)
67974 break;
67975
67976 if (pm_wakeup_pending()) {
67977 diff --git a/kernel/printk.c b/kernel/printk.c
67978 index 32690a0..cd7c798 100644
67979 --- a/kernel/printk.c
67980 +++ b/kernel/printk.c
67981 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67982 if (from_file && type != SYSLOG_ACTION_OPEN)
67983 return 0;
67984
67985 +#ifdef CONFIG_GRKERNSEC_DMESG
67986 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67987 + return -EPERM;
67988 +#endif
67989 +
67990 if (syslog_action_restricted(type)) {
67991 if (capable(CAP_SYSLOG))
67992 return 0;
67993 diff --git a/kernel/profile.c b/kernel/profile.c
67994 index 76b8e77..a2930e8 100644
67995 --- a/kernel/profile.c
67996 +++ b/kernel/profile.c
67997 @@ -39,7 +39,7 @@ struct profile_hit {
67998 /* Oprofile timer tick hook */
67999 static int (*timer_hook)(struct pt_regs *) __read_mostly;
68000
68001 -static atomic_t *prof_buffer;
68002 +static atomic_unchecked_t *prof_buffer;
68003 static unsigned long prof_len, prof_shift;
68004
68005 int prof_on __read_mostly;
68006 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
68007 hits[i].pc = 0;
68008 continue;
68009 }
68010 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68011 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68012 hits[i].hits = hits[i].pc = 0;
68013 }
68014 }
68015 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68016 * Add the current hit(s) and flush the write-queue out
68017 * to the global buffer:
68018 */
68019 - atomic_add(nr_hits, &prof_buffer[pc]);
68020 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
68021 for (i = 0; i < NR_PROFILE_HIT; ++i) {
68022 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68023 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68024 hits[i].pc = hits[i].hits = 0;
68025 }
68026 out:
68027 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68028 {
68029 unsigned long pc;
68030 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68031 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68032 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68033 }
68034 #endif /* !CONFIG_SMP */
68035
68036 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
68037 return -EFAULT;
68038 buf++; p++; count--; read++;
68039 }
68040 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68041 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68042 if (copy_to_user(buf, (void *)pnt, count))
68043 return -EFAULT;
68044 read += count;
68045 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
68046 }
68047 #endif
68048 profile_discard_flip_buffers();
68049 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68050 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68051 return count;
68052 }
68053
68054 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68055 index 00ab2ca..d237f61 100644
68056 --- a/kernel/ptrace.c
68057 +++ b/kernel/ptrace.c
68058 @@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68059 task->ptrace = PT_PTRACED;
68060 if (seize)
68061 task->ptrace |= PT_SEIZED;
68062 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68063 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
68064 task->ptrace |= PT_PTRACE_CAP;
68065
68066 __ptrace_link(task, current);
68067 @@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68068 break;
68069 return -EIO;
68070 }
68071 - if (copy_to_user(dst, buf, retval))
68072 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68073 return -EFAULT;
68074 copied += retval;
68075 src += retval;
68076 @@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
68077 bool seized = child->ptrace & PT_SEIZED;
68078 int ret = -EIO;
68079 siginfo_t siginfo, *si;
68080 - void __user *datavp = (void __user *) data;
68081 + void __user *datavp = (__force void __user *) data;
68082 unsigned long __user *datalp = datavp;
68083 unsigned long flags;
68084
68085 @@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68086 goto out;
68087 }
68088
68089 + if (gr_handle_ptrace(child, request)) {
68090 + ret = -EPERM;
68091 + goto out_put_task_struct;
68092 + }
68093 +
68094 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68095 ret = ptrace_attach(child, request, data);
68096 /*
68097 * Some architectures need to do book-keeping after
68098 * a ptrace attach.
68099 */
68100 - if (!ret)
68101 + if (!ret) {
68102 arch_ptrace_attach(child);
68103 + gr_audit_ptrace(child);
68104 + }
68105 goto out_put_task_struct;
68106 }
68107
68108 @@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68109 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68110 if (copied != sizeof(tmp))
68111 return -EIO;
68112 - return put_user(tmp, (unsigned long __user *)data);
68113 + return put_user(tmp, (__force unsigned long __user *)data);
68114 }
68115
68116 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68117 @@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68118 goto out;
68119 }
68120
68121 + if (gr_handle_ptrace(child, request)) {
68122 + ret = -EPERM;
68123 + goto out_put_task_struct;
68124 + }
68125 +
68126 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68127 ret = ptrace_attach(child, request, data);
68128 /*
68129 * Some architectures need to do book-keeping after
68130 * a ptrace attach.
68131 */
68132 - if (!ret)
68133 + if (!ret) {
68134 arch_ptrace_attach(child);
68135 + gr_audit_ptrace(child);
68136 + }
68137 goto out_put_task_struct;
68138 }
68139
68140 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68141 index 977296d..c4744dc 100644
68142 --- a/kernel/rcutiny.c
68143 +++ b/kernel/rcutiny.c
68144 @@ -46,7 +46,7 @@
68145 struct rcu_ctrlblk;
68146 static void invoke_rcu_callbacks(void);
68147 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68148 -static void rcu_process_callbacks(struct softirq_action *unused);
68149 +static void rcu_process_callbacks(void);
68150 static void __call_rcu(struct rcu_head *head,
68151 void (*func)(struct rcu_head *rcu),
68152 struct rcu_ctrlblk *rcp);
68153 @@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68154 rcu_is_callbacks_kthread()));
68155 }
68156
68157 -static void rcu_process_callbacks(struct softirq_action *unused)
68158 +static void rcu_process_callbacks(void)
68159 {
68160 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68161 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68162 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68163 index 9cb1ae4..aac7d3e 100644
68164 --- a/kernel/rcutiny_plugin.h
68165 +++ b/kernel/rcutiny_plugin.h
68166 @@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
68167 have_rcu_kthread_work = morework;
68168 local_irq_restore(flags);
68169 if (work)
68170 - rcu_process_callbacks(NULL);
68171 + rcu_process_callbacks();
68172 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68173 }
68174
68175 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68176 index a58ac28..196a3d8 100644
68177 --- a/kernel/rcutorture.c
68178 +++ b/kernel/rcutorture.c
68179 @@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68180 { 0 };
68181 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68182 { 0 };
68183 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68184 -static atomic_t n_rcu_torture_alloc;
68185 -static atomic_t n_rcu_torture_alloc_fail;
68186 -static atomic_t n_rcu_torture_free;
68187 -static atomic_t n_rcu_torture_mberror;
68188 -static atomic_t n_rcu_torture_error;
68189 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68190 +static atomic_unchecked_t n_rcu_torture_alloc;
68191 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
68192 +static atomic_unchecked_t n_rcu_torture_free;
68193 +static atomic_unchecked_t n_rcu_torture_mberror;
68194 +static atomic_unchecked_t n_rcu_torture_error;
68195 static long n_rcu_torture_boost_ktrerror;
68196 static long n_rcu_torture_boost_rterror;
68197 static long n_rcu_torture_boost_failure;
68198 @@ -243,11 +243,11 @@ rcu_torture_alloc(void)
68199
68200 spin_lock_bh(&rcu_torture_lock);
68201 if (list_empty(&rcu_torture_freelist)) {
68202 - atomic_inc(&n_rcu_torture_alloc_fail);
68203 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68204 spin_unlock_bh(&rcu_torture_lock);
68205 return NULL;
68206 }
68207 - atomic_inc(&n_rcu_torture_alloc);
68208 + atomic_inc_unchecked(&n_rcu_torture_alloc);
68209 p = rcu_torture_freelist.next;
68210 list_del_init(p);
68211 spin_unlock_bh(&rcu_torture_lock);
68212 @@ -260,7 +260,7 @@ rcu_torture_alloc(void)
68213 static void
68214 rcu_torture_free(struct rcu_torture *p)
68215 {
68216 - atomic_inc(&n_rcu_torture_free);
68217 + atomic_inc_unchecked(&n_rcu_torture_free);
68218 spin_lock_bh(&rcu_torture_lock);
68219 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68220 spin_unlock_bh(&rcu_torture_lock);
68221 @@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
68222 i = rp->rtort_pipe_count;
68223 if (i > RCU_TORTURE_PIPE_LEN)
68224 i = RCU_TORTURE_PIPE_LEN;
68225 - atomic_inc(&rcu_torture_wcount[i]);
68226 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68227 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68228 rp->rtort_mbtest = 0;
68229 rcu_torture_free(rp);
68230 @@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68231 i = rp->rtort_pipe_count;
68232 if (i > RCU_TORTURE_PIPE_LEN)
68233 i = RCU_TORTURE_PIPE_LEN;
68234 - atomic_inc(&rcu_torture_wcount[i]);
68235 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68236 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68237 rp->rtort_mbtest = 0;
68238 list_del(&rp->rtort_free);
68239 @@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
68240 i = old_rp->rtort_pipe_count;
68241 if (i > RCU_TORTURE_PIPE_LEN)
68242 i = RCU_TORTURE_PIPE_LEN;
68243 - atomic_inc(&rcu_torture_wcount[i]);
68244 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68245 old_rp->rtort_pipe_count++;
68246 cur_ops->deferred_free(old_rp);
68247 }
68248 @@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
68249 return;
68250 }
68251 if (p->rtort_mbtest == 0)
68252 - atomic_inc(&n_rcu_torture_mberror);
68253 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68254 spin_lock(&rand_lock);
68255 cur_ops->read_delay(&rand);
68256 n_rcu_torture_timers++;
68257 @@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
68258 continue;
68259 }
68260 if (p->rtort_mbtest == 0)
68261 - atomic_inc(&n_rcu_torture_mberror);
68262 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68263 cur_ops->read_delay(&rand);
68264 preempt_disable();
68265 pipe_count = p->rtort_pipe_count;
68266 @@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
68267 rcu_torture_current,
68268 rcu_torture_current_version,
68269 list_empty(&rcu_torture_freelist),
68270 - atomic_read(&n_rcu_torture_alloc),
68271 - atomic_read(&n_rcu_torture_alloc_fail),
68272 - atomic_read(&n_rcu_torture_free),
68273 - atomic_read(&n_rcu_torture_mberror),
68274 + atomic_read_unchecked(&n_rcu_torture_alloc),
68275 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68276 + atomic_read_unchecked(&n_rcu_torture_free),
68277 + atomic_read_unchecked(&n_rcu_torture_mberror),
68278 n_rcu_torture_boost_ktrerror,
68279 n_rcu_torture_boost_rterror,
68280 n_rcu_torture_boost_failure,
68281 @@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
68282 n_online_attempts,
68283 n_offline_successes,
68284 n_offline_attempts);
68285 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68286 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68287 n_rcu_torture_boost_ktrerror != 0 ||
68288 n_rcu_torture_boost_rterror != 0 ||
68289 n_rcu_torture_boost_failure != 0)
68290 @@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
68291 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68292 if (i > 1) {
68293 cnt += sprintf(&page[cnt], "!!! ");
68294 - atomic_inc(&n_rcu_torture_error);
68295 + atomic_inc_unchecked(&n_rcu_torture_error);
68296 WARN_ON_ONCE(1);
68297 }
68298 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68299 @@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
68300 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68301 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68302 cnt += sprintf(&page[cnt], " %d",
68303 - atomic_read(&rcu_torture_wcount[i]));
68304 + atomic_read_unchecked(&rcu_torture_wcount[i]));
68305 }
68306 cnt += sprintf(&page[cnt], "\n");
68307 if (cur_ops->stats)
68308 @@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
68309
68310 if (cur_ops->cleanup)
68311 cur_ops->cleanup();
68312 - if (atomic_read(&n_rcu_torture_error))
68313 + if (atomic_read_unchecked(&n_rcu_torture_error))
68314 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68315 else
68316 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68317 @@ -1664,17 +1664,17 @@ rcu_torture_init(void)
68318
68319 rcu_torture_current = NULL;
68320 rcu_torture_current_version = 0;
68321 - atomic_set(&n_rcu_torture_alloc, 0);
68322 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68323 - atomic_set(&n_rcu_torture_free, 0);
68324 - atomic_set(&n_rcu_torture_mberror, 0);
68325 - atomic_set(&n_rcu_torture_error, 0);
68326 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68327 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68328 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68329 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68330 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68331 n_rcu_torture_boost_ktrerror = 0;
68332 n_rcu_torture_boost_rterror = 0;
68333 n_rcu_torture_boost_failure = 0;
68334 n_rcu_torture_boosts = 0;
68335 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68336 - atomic_set(&rcu_torture_wcount[i], 0);
68337 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68338 for_each_possible_cpu(cpu) {
68339 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68340 per_cpu(rcu_torture_count, cpu)[i] = 0;
68341 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68342 index 6c4a672..70f3202 100644
68343 --- a/kernel/rcutree.c
68344 +++ b/kernel/rcutree.c
68345 @@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68346 rcu_prepare_for_idle(smp_processor_id());
68347 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68348 smp_mb__before_atomic_inc(); /* See above. */
68349 - atomic_inc(&rdtp->dynticks);
68350 + atomic_inc_unchecked(&rdtp->dynticks);
68351 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68352 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68353 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68354 }
68355
68356 /**
68357 @@ -438,10 +438,10 @@ void rcu_irq_exit(void)
68358 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68359 {
68360 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68361 - atomic_inc(&rdtp->dynticks);
68362 + atomic_inc_unchecked(&rdtp->dynticks);
68363 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68364 smp_mb__after_atomic_inc(); /* See above. */
68365 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68366 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68367 rcu_cleanup_after_idle(smp_processor_id());
68368 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68369 if (!is_idle_task(current)) {
68370 @@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
68371 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68372
68373 if (rdtp->dynticks_nmi_nesting == 0 &&
68374 - (atomic_read(&rdtp->dynticks) & 0x1))
68375 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68376 return;
68377 rdtp->dynticks_nmi_nesting++;
68378 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68379 - atomic_inc(&rdtp->dynticks);
68380 + atomic_inc_unchecked(&rdtp->dynticks);
68381 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68382 smp_mb__after_atomic_inc(); /* See above. */
68383 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68384 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68385 }
68386
68387 /**
68388 @@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
68389 return;
68390 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68391 smp_mb__before_atomic_inc(); /* See above. */
68392 - atomic_inc(&rdtp->dynticks);
68393 + atomic_inc_unchecked(&rdtp->dynticks);
68394 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68395 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68396 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68397 }
68398
68399 #ifdef CONFIG_PROVE_RCU
68400 @@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
68401 int ret;
68402
68403 preempt_disable();
68404 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68405 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68406 preempt_enable();
68407 return ret;
68408 }
68409 @@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68410 */
68411 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68412 {
68413 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68414 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68415 return (rdp->dynticks_snap & 0x1) == 0;
68416 }
68417
68418 @@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68419 unsigned int curr;
68420 unsigned int snap;
68421
68422 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68423 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68424 snap = (unsigned int)rdp->dynticks_snap;
68425
68426 /*
68427 @@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68428 /*
68429 * Do RCU core processing for the current CPU.
68430 */
68431 -static void rcu_process_callbacks(struct softirq_action *unused)
68432 +static void rcu_process_callbacks(void)
68433 {
68434 trace_rcu_utilization("Start RCU core");
68435 __rcu_process_callbacks(&rcu_sched_state,
68436 @@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68437 rdp->qlen = 0;
68438 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68439 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
68440 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68441 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68442 rdp->cpu = cpu;
68443 rdp->rsp = rsp;
68444 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68445 @@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68446 rdp->n_force_qs_snap = rsp->n_force_qs;
68447 rdp->blimit = blimit;
68448 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
68449 - atomic_set(&rdp->dynticks->dynticks,
68450 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68451 + atomic_set_unchecked(&rdp->dynticks->dynticks,
68452 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68453 rcu_prepare_for_idle_init(cpu);
68454 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68455
68456 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68457 index fddff92..2c08359 100644
68458 --- a/kernel/rcutree.h
68459 +++ b/kernel/rcutree.h
68460 @@ -87,7 +87,7 @@ struct rcu_dynticks {
68461 long long dynticks_nesting; /* Track irq/process nesting level. */
68462 /* Process level is worth LLONG_MAX/2. */
68463 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68464 - atomic_t dynticks; /* Even value for idle, else odd. */
68465 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68466 };
68467
68468 /* RCU's kthread states for tracing. */
68469 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68470 index 8bb35d7..6ea0a463 100644
68471 --- a/kernel/rcutree_plugin.h
68472 +++ b/kernel/rcutree_plugin.h
68473 @@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
68474
68475 /* Clean up and exit. */
68476 smp_mb(); /* ensure expedited GP seen before counter increment. */
68477 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68478 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68479 unlock_mb_ret:
68480 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68481 mb_ret:
68482 @@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68483
68484 #else /* #ifndef CONFIG_SMP */
68485
68486 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68487 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68488 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68489 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68490
68491 static int synchronize_sched_expedited_cpu_stop(void *data)
68492 {
68493 @@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
68494 int firstsnap, s, snap, trycount = 0;
68495
68496 /* Note that atomic_inc_return() implies full memory barrier. */
68497 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68498 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68499 get_online_cpus();
68500
68501 /*
68502 @@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
68503 }
68504
68505 /* Check to see if someone else did our work for us. */
68506 - s = atomic_read(&sync_sched_expedited_done);
68507 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68508 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68509 smp_mb(); /* ensure test happens before caller kfree */
68510 return;
68511 @@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
68512 * grace period works for us.
68513 */
68514 get_online_cpus();
68515 - snap = atomic_read(&sync_sched_expedited_started);
68516 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
68517 smp_mb(); /* ensure read is before try_stop_cpus(). */
68518 }
68519
68520 @@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
68521 * than we did beat us to the punch.
68522 */
68523 do {
68524 - s = atomic_read(&sync_sched_expedited_done);
68525 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68526 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68527 smp_mb(); /* ensure test happens before caller kfree */
68528 break;
68529 }
68530 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68531 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68532
68533 put_online_cpus();
68534 }
68535 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68536 index 654cfe6..c0b28e2 100644
68537 --- a/kernel/rcutree_trace.c
68538 +++ b/kernel/rcutree_trace.c
68539 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68540 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68541 rdp->qs_pending);
68542 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68543 - atomic_read(&rdp->dynticks->dynticks),
68544 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68545 rdp->dynticks->dynticks_nesting,
68546 rdp->dynticks->dynticks_nmi_nesting,
68547 rdp->dynticks_fqs);
68548 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68549 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68550 rdp->qs_pending);
68551 seq_printf(m, ",%d,%llx,%d,%lu",
68552 - atomic_read(&rdp->dynticks->dynticks),
68553 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68554 rdp->dynticks->dynticks_nesting,
68555 rdp->dynticks->dynticks_nmi_nesting,
68556 rdp->dynticks_fqs);
68557 diff --git a/kernel/resource.c b/kernel/resource.c
68558 index 7640b3a..5879283 100644
68559 --- a/kernel/resource.c
68560 +++ b/kernel/resource.c
68561 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68562
68563 static int __init ioresources_init(void)
68564 {
68565 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68566 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68567 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68568 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68569 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68570 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68571 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68572 +#endif
68573 +#else
68574 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68575 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68576 +#endif
68577 return 0;
68578 }
68579 __initcall(ioresources_init);
68580 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68581 index 98ec494..4241d6d 100644
68582 --- a/kernel/rtmutex-tester.c
68583 +++ b/kernel/rtmutex-tester.c
68584 @@ -20,7 +20,7 @@
68585 #define MAX_RT_TEST_MUTEXES 8
68586
68587 static spinlock_t rttest_lock;
68588 -static atomic_t rttest_event;
68589 +static atomic_unchecked_t rttest_event;
68590
68591 struct test_thread_data {
68592 int opcode;
68593 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68594
68595 case RTTEST_LOCKCONT:
68596 td->mutexes[td->opdata] = 1;
68597 - td->event = atomic_add_return(1, &rttest_event);
68598 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68599 return 0;
68600
68601 case RTTEST_RESET:
68602 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68603 return 0;
68604
68605 case RTTEST_RESETEVENT:
68606 - atomic_set(&rttest_event, 0);
68607 + atomic_set_unchecked(&rttest_event, 0);
68608 return 0;
68609
68610 default:
68611 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68612 return ret;
68613
68614 td->mutexes[id] = 1;
68615 - td->event = atomic_add_return(1, &rttest_event);
68616 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68617 rt_mutex_lock(&mutexes[id]);
68618 - td->event = atomic_add_return(1, &rttest_event);
68619 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68620 td->mutexes[id] = 4;
68621 return 0;
68622
68623 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68624 return ret;
68625
68626 td->mutexes[id] = 1;
68627 - td->event = atomic_add_return(1, &rttest_event);
68628 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68629 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68630 - td->event = atomic_add_return(1, &rttest_event);
68631 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68632 td->mutexes[id] = ret ? 0 : 4;
68633 return ret ? -EINTR : 0;
68634
68635 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68636 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68637 return ret;
68638
68639 - td->event = atomic_add_return(1, &rttest_event);
68640 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68641 rt_mutex_unlock(&mutexes[id]);
68642 - td->event = atomic_add_return(1, &rttest_event);
68643 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68644 td->mutexes[id] = 0;
68645 return 0;
68646
68647 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68648 break;
68649
68650 td->mutexes[dat] = 2;
68651 - td->event = atomic_add_return(1, &rttest_event);
68652 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68653 break;
68654
68655 default:
68656 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68657 return;
68658
68659 td->mutexes[dat] = 3;
68660 - td->event = atomic_add_return(1, &rttest_event);
68661 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68662 break;
68663
68664 case RTTEST_LOCKNOWAIT:
68665 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68666 return;
68667
68668 td->mutexes[dat] = 1;
68669 - td->event = atomic_add_return(1, &rttest_event);
68670 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68671 return;
68672
68673 default:
68674 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68675 index e8a1f83..363d17d 100644
68676 --- a/kernel/sched/auto_group.c
68677 +++ b/kernel/sched/auto_group.c
68678 @@ -11,7 +11,7 @@
68679
68680 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68681 static struct autogroup autogroup_default;
68682 -static atomic_t autogroup_seq_nr;
68683 +static atomic_unchecked_t autogroup_seq_nr;
68684
68685 void __init autogroup_init(struct task_struct *init_task)
68686 {
68687 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68688
68689 kref_init(&ag->kref);
68690 init_rwsem(&ag->lock);
68691 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68692 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68693 ag->tg = tg;
68694 #ifdef CONFIG_RT_GROUP_SCHED
68695 /*
68696 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68697 index b342f57..00324a0 100644
68698 --- a/kernel/sched/core.c
68699 +++ b/kernel/sched/core.c
68700 @@ -3143,6 +3143,19 @@ pick_next_task(struct rq *rq)
68701 BUG(); /* the idle class will always have a runnable task */
68702 }
68703
68704 +#ifdef CONFIG_GRKERNSEC_SETXID
68705 +extern void gr_delayed_cred_worker(void);
68706 +static inline void gr_cred_schedule(void)
68707 +{
68708 + if (unlikely(current->delayed_cred))
68709 + gr_delayed_cred_worker();
68710 +}
68711 +#else
68712 +static inline void gr_cred_schedule(void)
68713 +{
68714 +}
68715 +#endif
68716 +
68717 /*
68718 * __schedule() is the main scheduler function.
68719 */
68720 @@ -3162,6 +3175,8 @@ need_resched:
68721
68722 schedule_debug(prev);
68723
68724 + gr_cred_schedule();
68725 +
68726 if (sched_feat(HRTICK))
68727 hrtick_clear(rq);
68728
68729 @@ -3852,6 +3867,8 @@ int can_nice(const struct task_struct *p, const int nice)
68730 /* convert nice value [19,-20] to rlimit style value [1,40] */
68731 int nice_rlim = 20 - nice;
68732
68733 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68734 +
68735 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68736 capable(CAP_SYS_NICE));
68737 }
68738 @@ -3885,7 +3902,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68739 if (nice > 19)
68740 nice = 19;
68741
68742 - if (increment < 0 && !can_nice(current, nice))
68743 + if (increment < 0 && (!can_nice(current, nice) ||
68744 + gr_handle_chroot_nice()))
68745 return -EPERM;
68746
68747 retval = security_task_setnice(current, nice);
68748 @@ -4042,6 +4060,7 @@ recheck:
68749 unsigned long rlim_rtprio =
68750 task_rlimit(p, RLIMIT_RTPRIO);
68751
68752 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68753 /* can't set/change the rt policy */
68754 if (policy != p->policy && !rlim_rtprio)
68755 return -EPERM;
68756 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68757 index aca16b8..8e3acc4 100644
68758 --- a/kernel/sched/fair.c
68759 +++ b/kernel/sched/fair.c
68760 @@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68761 * run_rebalance_domains is triggered when needed from the scheduler tick.
68762 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68763 */
68764 -static void run_rebalance_domains(struct softirq_action *h)
68765 +static void run_rebalance_domains(void)
68766 {
68767 int this_cpu = smp_processor_id();
68768 struct rq *this_rq = cpu_rq(this_cpu);
68769 diff --git a/kernel/signal.c b/kernel/signal.c
68770 index c73c428..7040057 100644
68771 --- a/kernel/signal.c
68772 +++ b/kernel/signal.c
68773 @@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
68774
68775 int print_fatal_signals __read_mostly;
68776
68777 -static void __user *sig_handler(struct task_struct *t, int sig)
68778 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68779 {
68780 return t->sighand->action[sig - 1].sa.sa_handler;
68781 }
68782
68783 -static int sig_handler_ignored(void __user *handler, int sig)
68784 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68785 {
68786 /* Is it explicitly or implicitly ignored? */
68787 return handler == SIG_IGN ||
68788 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68789 static int sig_task_ignored(struct task_struct *t, int sig,
68790 int from_ancestor_ns)
68791 {
68792 - void __user *handler;
68793 + __sighandler_t handler;
68794
68795 handler = sig_handler(t, sig);
68796
68797 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68798 atomic_inc(&user->sigpending);
68799 rcu_read_unlock();
68800
68801 + if (!override_rlimit)
68802 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68803 +
68804 if (override_rlimit ||
68805 atomic_read(&user->sigpending) <=
68806 task_rlimit(t, RLIMIT_SIGPENDING)) {
68807 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68808
68809 int unhandled_signal(struct task_struct *tsk, int sig)
68810 {
68811 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68812 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68813 if (is_global_init(tsk))
68814 return 1;
68815 if (handler != SIG_IGN && handler != SIG_DFL)
68816 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68817 }
68818 }
68819
68820 + /* allow glibc communication via tgkill to other threads in our
68821 + thread group */
68822 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68823 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68824 + && gr_handle_signal(t, sig))
68825 + return -EPERM;
68826 +
68827 return security_task_kill(t, info, sig, 0);
68828 }
68829
68830 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68831 return send_signal(sig, info, p, 1);
68832 }
68833
68834 -static int
68835 +int
68836 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68837 {
68838 return send_signal(sig, info, t, 0);
68839 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68840 unsigned long int flags;
68841 int ret, blocked, ignored;
68842 struct k_sigaction *action;
68843 + int is_unhandled = 0;
68844
68845 spin_lock_irqsave(&t->sighand->siglock, flags);
68846 action = &t->sighand->action[sig-1];
68847 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68848 }
68849 if (action->sa.sa_handler == SIG_DFL)
68850 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68851 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68852 + is_unhandled = 1;
68853 ret = specific_send_sig_info(sig, info, t);
68854 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68855
68856 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68857 + normal operation */
68858 + if (is_unhandled) {
68859 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68860 + gr_handle_crash(t, sig);
68861 + }
68862 +
68863 return ret;
68864 }
68865
68866 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68867 ret = check_kill_permission(sig, info, p);
68868 rcu_read_unlock();
68869
68870 - if (!ret && sig)
68871 + if (!ret && sig) {
68872 ret = do_send_sig_info(sig, info, p, true);
68873 + if (!ret)
68874 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68875 + }
68876
68877 return ret;
68878 }
68879 @@ -2820,7 +2843,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68880 int error = -ESRCH;
68881
68882 rcu_read_lock();
68883 - p = find_task_by_vpid(pid);
68884 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68885 + /* allow glibc communication via tgkill to other threads in our
68886 + thread group */
68887 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68888 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68889 + p = find_task_by_vpid_unrestricted(pid);
68890 + else
68891 +#endif
68892 + p = find_task_by_vpid(pid);
68893 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68894 error = check_kill_permission(sig, info, p);
68895 /*
68896 diff --git a/kernel/smp.c b/kernel/smp.c
68897 index db197d6..17aef0b 100644
68898 --- a/kernel/smp.c
68899 +++ b/kernel/smp.c
68900 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68901 }
68902 EXPORT_SYMBOL(smp_call_function);
68903
68904 -void ipi_call_lock(void)
68905 +void ipi_call_lock(void) __acquires(call_function.lock)
68906 {
68907 raw_spin_lock(&call_function.lock);
68908 }
68909
68910 -void ipi_call_unlock(void)
68911 +void ipi_call_unlock(void) __releases(call_function.lock)
68912 {
68913 raw_spin_unlock(&call_function.lock);
68914 }
68915
68916 -void ipi_call_lock_irq(void)
68917 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68918 {
68919 raw_spin_lock_irq(&call_function.lock);
68920 }
68921
68922 -void ipi_call_unlock_irq(void)
68923 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68924 {
68925 raw_spin_unlock_irq(&call_function.lock);
68926 }
68927 diff --git a/kernel/softirq.c b/kernel/softirq.c
68928 index 4eb3a0f..6f1fa81 100644
68929 --- a/kernel/softirq.c
68930 +++ b/kernel/softirq.c
68931 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68932
68933 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68934
68935 -char *softirq_to_name[NR_SOFTIRQS] = {
68936 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68937 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68938 "TASKLET", "SCHED", "HRTIMER", "RCU"
68939 };
68940 @@ -235,7 +235,7 @@ restart:
68941 kstat_incr_softirqs_this_cpu(vec_nr);
68942
68943 trace_softirq_entry(vec_nr);
68944 - h->action(h);
68945 + h->action();
68946 trace_softirq_exit(vec_nr);
68947 if (unlikely(prev_count != preempt_count())) {
68948 printk(KERN_ERR "huh, entered softirq %u %s %p"
68949 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68950 local_irq_restore(flags);
68951 }
68952
68953 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68954 +void open_softirq(int nr, void (*action)(void))
68955 {
68956 - softirq_vec[nr].action = action;
68957 + pax_open_kernel();
68958 + *(void **)&softirq_vec[nr].action = action;
68959 + pax_close_kernel();
68960 }
68961
68962 /*
68963 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68964
68965 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68966
68967 -static void tasklet_action(struct softirq_action *a)
68968 +static void tasklet_action(void)
68969 {
68970 struct tasklet_struct *list;
68971
68972 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68973 }
68974 }
68975
68976 -static void tasklet_hi_action(struct softirq_action *a)
68977 +static void tasklet_hi_action(void)
68978 {
68979 struct tasklet_struct *list;
68980
68981 diff --git a/kernel/sys.c b/kernel/sys.c
68982 index 888d227..f04b318 100644
68983 --- a/kernel/sys.c
68984 +++ b/kernel/sys.c
68985 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68986 error = -EACCES;
68987 goto out;
68988 }
68989 +
68990 + if (gr_handle_chroot_setpriority(p, niceval)) {
68991 + error = -EACCES;
68992 + goto out;
68993 + }
68994 +
68995 no_nice = security_task_setnice(p, niceval);
68996 if (no_nice) {
68997 error = no_nice;
68998 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68999 goto error;
69000 }
69001
69002 + if (gr_check_group_change(new->gid, new->egid, -1))
69003 + goto error;
69004 +
69005 if (rgid != (gid_t) -1 ||
69006 (egid != (gid_t) -1 && egid != old->gid))
69007 new->sgid = new->egid;
69008 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
69009 old = current_cred();
69010
69011 retval = -EPERM;
69012 +
69013 + if (gr_check_group_change(gid, gid, gid))
69014 + goto error;
69015 +
69016 if (nsown_capable(CAP_SETGID))
69017 new->gid = new->egid = new->sgid = new->fsgid = gid;
69018 else if (gid == old->gid || gid == old->sgid)
69019 @@ -618,7 +631,7 @@ error:
69020 /*
69021 * change the user struct in a credentials set to match the new UID
69022 */
69023 -static int set_user(struct cred *new)
69024 +int set_user(struct cred *new)
69025 {
69026 struct user_struct *new_user;
69027
69028 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
69029 goto error;
69030 }
69031
69032 + if (gr_check_user_change(new->uid, new->euid, -1))
69033 + goto error;
69034 +
69035 if (new->uid != old->uid) {
69036 retval = set_user(new);
69037 if (retval < 0)
69038 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
69039 old = current_cred();
69040
69041 retval = -EPERM;
69042 +
69043 + if (gr_check_crash_uid(uid))
69044 + goto error;
69045 + if (gr_check_user_change(uid, uid, uid))
69046 + goto error;
69047 +
69048 if (nsown_capable(CAP_SETUID)) {
69049 new->suid = new->uid = uid;
69050 if (uid != old->uid) {
69051 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
69052 goto error;
69053 }
69054
69055 + if (gr_check_user_change(ruid, euid, -1))
69056 + goto error;
69057 +
69058 if (ruid != (uid_t) -1) {
69059 new->uid = ruid;
69060 if (ruid != old->uid) {
69061 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69062 goto error;
69063 }
69064
69065 + if (gr_check_group_change(rgid, egid, -1))
69066 + goto error;
69067 +
69068 if (rgid != (gid_t) -1)
69069 new->gid = rgid;
69070 if (egid != (gid_t) -1)
69071 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69072 old = current_cred();
69073 old_fsuid = old->fsuid;
69074
69075 + if (gr_check_user_change(-1, -1, uid))
69076 + goto error;
69077 +
69078 if (uid == old->uid || uid == old->euid ||
69079 uid == old->suid || uid == old->fsuid ||
69080 nsown_capable(CAP_SETUID)) {
69081 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69082 }
69083 }
69084
69085 +error:
69086 abort_creds(new);
69087 return old_fsuid;
69088
69089 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69090 if (gid == old->gid || gid == old->egid ||
69091 gid == old->sgid || gid == old->fsgid ||
69092 nsown_capable(CAP_SETGID)) {
69093 + if (gr_check_group_change(-1, -1, gid))
69094 + goto error;
69095 +
69096 if (gid != old_fsgid) {
69097 new->fsgid = gid;
69098 goto change_okay;
69099 }
69100 }
69101
69102 +error:
69103 abort_creds(new);
69104 return old_fsgid;
69105
69106 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
69107 }
69108 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69109 snprintf(buf, len, "2.6.%u%s", v, rest);
69110 - ret = copy_to_user(release, buf, len);
69111 + if (len > sizeof(buf))
69112 + ret = -EFAULT;
69113 + else
69114 + ret = copy_to_user(release, buf, len);
69115 }
69116 return ret;
69117 }
69118 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69119 return -EFAULT;
69120
69121 down_read(&uts_sem);
69122 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
69123 + error = __copy_to_user(name->sysname, &utsname()->sysname,
69124 __OLD_UTS_LEN);
69125 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69126 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69127 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
69128 __OLD_UTS_LEN);
69129 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69130 - error |= __copy_to_user(&name->release, &utsname()->release,
69131 + error |= __copy_to_user(name->release, &utsname()->release,
69132 __OLD_UTS_LEN);
69133 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69134 - error |= __copy_to_user(&name->version, &utsname()->version,
69135 + error |= __copy_to_user(name->version, &utsname()->version,
69136 __OLD_UTS_LEN);
69137 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69138 - error |= __copy_to_user(&name->machine, &utsname()->machine,
69139 + error |= __copy_to_user(name->machine, &utsname()->machine,
69140 __OLD_UTS_LEN);
69141 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69142 up_read(&uts_sem);
69143 @@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69144 error = get_dumpable(me->mm);
69145 break;
69146 case PR_SET_DUMPABLE:
69147 - if (arg2 < 0 || arg2 > 1) {
69148 + if (arg2 > 1) {
69149 error = -EINVAL;
69150 break;
69151 }
69152 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69153 index f03a6ef..5fcc8af 100644
69154 --- a/kernel/sysctl.c
69155 +++ b/kernel/sysctl.c
69156 @@ -86,6 +86,13 @@
69157
69158
69159 #if defined(CONFIG_SYSCTL)
69160 +#include <linux/grsecurity.h>
69161 +#include <linux/grinternal.h>
69162 +
69163 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69164 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69165 + const int op);
69166 +extern int gr_handle_chroot_sysctl(const int op);
69167
69168 /* External variables not in a header file. */
69169 extern int sysctl_overcommit_memory;
69170 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69171 }
69172
69173 #endif
69174 +extern struct ctl_table grsecurity_table[];
69175
69176 static struct ctl_table root_table[];
69177 static struct ctl_table_root sysctl_table_root;
69178 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
69179 int sysctl_legacy_va_layout;
69180 #endif
69181
69182 +#ifdef CONFIG_PAX_SOFTMODE
69183 +static ctl_table pax_table[] = {
69184 + {
69185 + .procname = "softmode",
69186 + .data = &pax_softmode,
69187 + .maxlen = sizeof(unsigned int),
69188 + .mode = 0600,
69189 + .proc_handler = &proc_dointvec,
69190 + },
69191 +
69192 + { }
69193 +};
69194 +#endif
69195 +
69196 /* The default sysctl tables: */
69197
69198 static struct ctl_table root_table[] = {
69199 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
69200 #endif
69201
69202 static struct ctl_table kern_table[] = {
69203 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69204 + {
69205 + .procname = "grsecurity",
69206 + .mode = 0500,
69207 + .child = grsecurity_table,
69208 + },
69209 +#endif
69210 +
69211 +#ifdef CONFIG_PAX_SOFTMODE
69212 + {
69213 + .procname = "pax",
69214 + .mode = 0500,
69215 + .child = pax_table,
69216 + },
69217 +#endif
69218 +
69219 {
69220 .procname = "sched_child_runs_first",
69221 .data = &sysctl_sched_child_runs_first,
69222 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
69223 .data = &modprobe_path,
69224 .maxlen = KMOD_PATH_LEN,
69225 .mode = 0644,
69226 - .proc_handler = proc_dostring,
69227 + .proc_handler = proc_dostring_modpriv,
69228 },
69229 {
69230 .procname = "modules_disabled",
69231 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
69232 .extra1 = &zero,
69233 .extra2 = &one,
69234 },
69235 +#endif
69236 {
69237 .procname = "kptr_restrict",
69238 .data = &kptr_restrict,
69239 .maxlen = sizeof(int),
69240 .mode = 0644,
69241 .proc_handler = proc_dointvec_minmax_sysadmin,
69242 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69243 + .extra1 = &two,
69244 +#else
69245 .extra1 = &zero,
69246 +#endif
69247 .extra2 = &two,
69248 },
69249 -#endif
69250 {
69251 .procname = "ngroups_max",
69252 .data = &ngroups_max,
69253 @@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
69254 .proc_handler = proc_dointvec_minmax,
69255 .extra1 = &zero,
69256 },
69257 + {
69258 + .procname = "heap_stack_gap",
69259 + .data = &sysctl_heap_stack_gap,
69260 + .maxlen = sizeof(sysctl_heap_stack_gap),
69261 + .mode = 0644,
69262 + .proc_handler = proc_doulongvec_minmax,
69263 + },
69264 #else
69265 {
69266 .procname = "nr_trim_pages",
69267 @@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
69268 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69269 {
69270 int mode;
69271 + int error;
69272 +
69273 + if (table->parent != NULL && table->parent->procname != NULL &&
69274 + table->procname != NULL &&
69275 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69276 + return -EACCES;
69277 + if (gr_handle_chroot_sysctl(op))
69278 + return -EACCES;
69279 + error = gr_handle_sysctl(table, op);
69280 + if (error)
69281 + return error;
69282
69283 if (root->permissions)
69284 mode = root->permissions(root, current->nsproxy, table);
69285 @@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
69286 buffer, lenp, ppos);
69287 }
69288
69289 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69290 + void __user *buffer, size_t *lenp, loff_t *ppos)
69291 +{
69292 + if (write && !capable(CAP_SYS_MODULE))
69293 + return -EPERM;
69294 +
69295 + return _proc_do_string(table->data, table->maxlen, write,
69296 + buffer, lenp, ppos);
69297 +}
69298 +
69299 static size_t proc_skip_spaces(char **buf)
69300 {
69301 size_t ret;
69302 @@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69303 len = strlen(tmp);
69304 if (len > *size)
69305 len = *size;
69306 + if (len > sizeof(tmp))
69307 + len = sizeof(tmp);
69308 if (copy_to_user(*buf, tmp, len))
69309 return -EFAULT;
69310 *size -= len;
69311 @@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69312 *i = val;
69313 } else {
69314 val = convdiv * (*i) / convmul;
69315 - if (!first)
69316 + if (!first) {
69317 err = proc_put_char(&buffer, &left, '\t');
69318 + if (err)
69319 + break;
69320 + }
69321 err = proc_put_long(&buffer, &left, val, false);
69322 if (err)
69323 break;
69324 @@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
69325 return -ENOSYS;
69326 }
69327
69328 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69329 + void __user *buffer, size_t *lenp, loff_t *ppos)
69330 +{
69331 + return -ENOSYS;
69332 +}
69333 +
69334 int proc_dointvec(struct ctl_table *table, int write,
69335 void __user *buffer, size_t *lenp, loff_t *ppos)
69336 {
69337 @@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69338 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69339 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69340 EXPORT_SYMBOL(proc_dostring);
69341 +EXPORT_SYMBOL(proc_dostring_modpriv);
69342 EXPORT_SYMBOL(proc_doulongvec_minmax);
69343 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69344 EXPORT_SYMBOL(register_sysctl_table);
69345 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69346 index a650694..aaeeb20 100644
69347 --- a/kernel/sysctl_binary.c
69348 +++ b/kernel/sysctl_binary.c
69349 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69350 int i;
69351
69352 set_fs(KERNEL_DS);
69353 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69354 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69355 set_fs(old_fs);
69356 if (result < 0)
69357 goto out_kfree;
69358 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69359 }
69360
69361 set_fs(KERNEL_DS);
69362 - result = vfs_write(file, buffer, str - buffer, &pos);
69363 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69364 set_fs(old_fs);
69365 if (result < 0)
69366 goto out_kfree;
69367 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69368 int i;
69369
69370 set_fs(KERNEL_DS);
69371 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69372 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69373 set_fs(old_fs);
69374 if (result < 0)
69375 goto out_kfree;
69376 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69377 }
69378
69379 set_fs(KERNEL_DS);
69380 - result = vfs_write(file, buffer, str - buffer, &pos);
69381 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69382 set_fs(old_fs);
69383 if (result < 0)
69384 goto out_kfree;
69385 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69386 int i;
69387
69388 set_fs(KERNEL_DS);
69389 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69390 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69391 set_fs(old_fs);
69392 if (result < 0)
69393 goto out;
69394 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69395 __le16 dnaddr;
69396
69397 set_fs(KERNEL_DS);
69398 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69399 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69400 set_fs(old_fs);
69401 if (result < 0)
69402 goto out;
69403 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69404 le16_to_cpu(dnaddr) & 0x3ff);
69405
69406 set_fs(KERNEL_DS);
69407 - result = vfs_write(file, buf, len, &pos);
69408 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69409 set_fs(old_fs);
69410 if (result < 0)
69411 goto out;
69412 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69413 index 362da65..ab8ef8c 100644
69414 --- a/kernel/sysctl_check.c
69415 +++ b/kernel/sysctl_check.c
69416 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69417 set_fail(&fail, table, "Directory with extra2");
69418 } else {
69419 if ((table->proc_handler == proc_dostring) ||
69420 + (table->proc_handler == proc_dostring_modpriv) ||
69421 (table->proc_handler == proc_dointvec) ||
69422 (table->proc_handler == proc_dointvec_minmax) ||
69423 (table->proc_handler == proc_dointvec_jiffies) ||
69424 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69425 index e660464..c8b9e67 100644
69426 --- a/kernel/taskstats.c
69427 +++ b/kernel/taskstats.c
69428 @@ -27,9 +27,12 @@
69429 #include <linux/cgroup.h>
69430 #include <linux/fs.h>
69431 #include <linux/file.h>
69432 +#include <linux/grsecurity.h>
69433 #include <net/genetlink.h>
69434 #include <linux/atomic.h>
69435
69436 +extern int gr_is_taskstats_denied(int pid);
69437 +
69438 /*
69439 * Maximum length of a cpumask that can be specified in
69440 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69441 @@ -556,6 +559,9 @@ err:
69442
69443 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69444 {
69445 + if (gr_is_taskstats_denied(current->pid))
69446 + return -EACCES;
69447 +
69448 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69449 return cmd_attr_register_cpumask(info);
69450 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69451 diff --git a/kernel/time.c b/kernel/time.c
69452 index 73e416d..cfc6f69 100644
69453 --- a/kernel/time.c
69454 +++ b/kernel/time.c
69455 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69456 return error;
69457
69458 if (tz) {
69459 + /* we log in do_settimeofday called below, so don't log twice
69460 + */
69461 + if (!tv)
69462 + gr_log_timechange();
69463 +
69464 /* SMP safe, global irq locking makes it work. */
69465 sys_tz = *tz;
69466 update_vsyscall_tz();
69467 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69468 index 8a46f5d..bbe6f9c 100644
69469 --- a/kernel/time/alarmtimer.c
69470 +++ b/kernel/time/alarmtimer.c
69471 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
69472 struct platform_device *pdev;
69473 int error = 0;
69474 int i;
69475 - struct k_clock alarm_clock = {
69476 + static struct k_clock alarm_clock = {
69477 .clock_getres = alarm_clock_getres,
69478 .clock_get = alarm_clock_get,
69479 .timer_create = alarm_timer_create,
69480 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69481 index fd4a7b1..fae5c2a 100644
69482 --- a/kernel/time/tick-broadcast.c
69483 +++ b/kernel/time/tick-broadcast.c
69484 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69485 * then clear the broadcast bit.
69486 */
69487 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69488 - int cpu = smp_processor_id();
69489 + cpu = smp_processor_id();
69490
69491 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69492 tick_broadcast_clear_oneshot(cpu);
69493 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69494 index 0c63581..e25dcb6 100644
69495 --- a/kernel/time/timekeeping.c
69496 +++ b/kernel/time/timekeeping.c
69497 @@ -14,6 +14,7 @@
69498 #include <linux/init.h>
69499 #include <linux/mm.h>
69500 #include <linux/sched.h>
69501 +#include <linux/grsecurity.h>
69502 #include <linux/syscore_ops.h>
69503 #include <linux/clocksource.h>
69504 #include <linux/jiffies.h>
69505 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
69506 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69507 return -EINVAL;
69508
69509 + gr_log_timechange();
69510 +
69511 write_seqlock_irqsave(&xtime_lock, flags);
69512
69513 timekeeping_forward_now();
69514 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69515 index 3258455..f35227d 100644
69516 --- a/kernel/time/timer_list.c
69517 +++ b/kernel/time/timer_list.c
69518 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69519
69520 static void print_name_offset(struct seq_file *m, void *sym)
69521 {
69522 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69523 + SEQ_printf(m, "<%p>", NULL);
69524 +#else
69525 char symname[KSYM_NAME_LEN];
69526
69527 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69528 SEQ_printf(m, "<%pK>", sym);
69529 else
69530 SEQ_printf(m, "%s", symname);
69531 +#endif
69532 }
69533
69534 static void
69535 @@ -112,7 +116,11 @@ next_one:
69536 static void
69537 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69538 {
69539 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69540 + SEQ_printf(m, " .base: %p\n", NULL);
69541 +#else
69542 SEQ_printf(m, " .base: %pK\n", base);
69543 +#endif
69544 SEQ_printf(m, " .index: %d\n",
69545 base->index);
69546 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69547 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69548 {
69549 struct proc_dir_entry *pe;
69550
69551 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69552 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69553 +#else
69554 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69555 +#endif
69556 if (!pe)
69557 return -ENOMEM;
69558 return 0;
69559 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69560 index 0b537f2..9e71eca 100644
69561 --- a/kernel/time/timer_stats.c
69562 +++ b/kernel/time/timer_stats.c
69563 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69564 static unsigned long nr_entries;
69565 static struct entry entries[MAX_ENTRIES];
69566
69567 -static atomic_t overflow_count;
69568 +static atomic_unchecked_t overflow_count;
69569
69570 /*
69571 * The entries are in a hash-table, for fast lookup:
69572 @@ -140,7 +140,7 @@ static void reset_entries(void)
69573 nr_entries = 0;
69574 memset(entries, 0, sizeof(entries));
69575 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69576 - atomic_set(&overflow_count, 0);
69577 + atomic_set_unchecked(&overflow_count, 0);
69578 }
69579
69580 static struct entry *alloc_entry(void)
69581 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69582 if (likely(entry))
69583 entry->count++;
69584 else
69585 - atomic_inc(&overflow_count);
69586 + atomic_inc_unchecked(&overflow_count);
69587
69588 out_unlock:
69589 raw_spin_unlock_irqrestore(lock, flags);
69590 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69591
69592 static void print_name_offset(struct seq_file *m, unsigned long addr)
69593 {
69594 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69595 + seq_printf(m, "<%p>", NULL);
69596 +#else
69597 char symname[KSYM_NAME_LEN];
69598
69599 if (lookup_symbol_name(addr, symname) < 0)
69600 seq_printf(m, "<%p>", (void *)addr);
69601 else
69602 seq_printf(m, "%s", symname);
69603 +#endif
69604 }
69605
69606 static int tstats_show(struct seq_file *m, void *v)
69607 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69608
69609 seq_puts(m, "Timer Stats Version: v0.2\n");
69610 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69611 - if (atomic_read(&overflow_count))
69612 + if (atomic_read_unchecked(&overflow_count))
69613 seq_printf(m, "Overflow: %d entries\n",
69614 - atomic_read(&overflow_count));
69615 + atomic_read_unchecked(&overflow_count));
69616
69617 for (i = 0; i < nr_entries; i++) {
69618 entry = entries + i;
69619 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69620 {
69621 struct proc_dir_entry *pe;
69622
69623 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69624 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69625 +#else
69626 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69627 +#endif
69628 if (!pe)
69629 return -ENOMEM;
69630 return 0;
69631 diff --git a/kernel/timer.c b/kernel/timer.c
69632 index a297ffc..5e16b0b 100644
69633 --- a/kernel/timer.c
69634 +++ b/kernel/timer.c
69635 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69636 /*
69637 * This function runs timers and the timer-tq in bottom half context.
69638 */
69639 -static void run_timer_softirq(struct softirq_action *h)
69640 +static void run_timer_softirq(void)
69641 {
69642 struct tvec_base *base = __this_cpu_read(tvec_bases);
69643
69644 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69645 index cdea7b5..9b820d4 100644
69646 --- a/kernel/trace/blktrace.c
69647 +++ b/kernel/trace/blktrace.c
69648 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69649 struct blk_trace *bt = filp->private_data;
69650 char buf[16];
69651
69652 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69653 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69654
69655 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69656 }
69657 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69658 return 1;
69659
69660 bt = buf->chan->private_data;
69661 - atomic_inc(&bt->dropped);
69662 + atomic_inc_unchecked(&bt->dropped);
69663 return 0;
69664 }
69665
69666 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69667
69668 bt->dir = dir;
69669 bt->dev = dev;
69670 - atomic_set(&bt->dropped, 0);
69671 + atomic_set_unchecked(&bt->dropped, 0);
69672
69673 ret = -EIO;
69674 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69675 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69676 index 683d559..d70d914 100644
69677 --- a/kernel/trace/ftrace.c
69678 +++ b/kernel/trace/ftrace.c
69679 @@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69680 if (unlikely(ftrace_disabled))
69681 return 0;
69682
69683 + ret = ftrace_arch_code_modify_prepare();
69684 + FTRACE_WARN_ON(ret);
69685 + if (ret)
69686 + return 0;
69687 +
69688 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69689 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69690 if (ret) {
69691 ftrace_bug(ret, ip);
69692 - return 0;
69693 }
69694 - return 1;
69695 + return ret ? 0 : 1;
69696 }
69697
69698 /*
69699 @@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69700
69701 int
69702 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69703 - void *data)
69704 + void *data)
69705 {
69706 struct ftrace_func_probe *entry;
69707 struct ftrace_page *pg;
69708 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69709 index c4579f1..6a439da 100644
69710 --- a/kernel/trace/trace.c
69711 +++ b/kernel/trace/trace.c
69712 @@ -4258,10 +4258,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69713 };
69714 #endif
69715
69716 -static struct dentry *d_tracer;
69717 -
69718 struct dentry *tracing_init_dentry(void)
69719 {
69720 + static struct dentry *d_tracer;
69721 static int once;
69722
69723 if (d_tracer)
69724 @@ -4281,10 +4280,9 @@ struct dentry *tracing_init_dentry(void)
69725 return d_tracer;
69726 }
69727
69728 -static struct dentry *d_percpu;
69729 -
69730 struct dentry *tracing_dentry_percpu(void)
69731 {
69732 + static struct dentry *d_percpu;
69733 static int once;
69734 struct dentry *d_tracer;
69735
69736 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69737 index c212a7f..7b02394 100644
69738 --- a/kernel/trace/trace_events.c
69739 +++ b/kernel/trace/trace_events.c
69740 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
69741 struct ftrace_module_file_ops {
69742 struct list_head list;
69743 struct module *mod;
69744 - struct file_operations id;
69745 - struct file_operations enable;
69746 - struct file_operations format;
69747 - struct file_operations filter;
69748 };
69749
69750 static struct ftrace_module_file_ops *
69751 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
69752
69753 file_ops->mod = mod;
69754
69755 - file_ops->id = ftrace_event_id_fops;
69756 - file_ops->id.owner = mod;
69757 -
69758 - file_ops->enable = ftrace_enable_fops;
69759 - file_ops->enable.owner = mod;
69760 -
69761 - file_ops->filter = ftrace_event_filter_fops;
69762 - file_ops->filter.owner = mod;
69763 -
69764 - file_ops->format = ftrace_event_format_fops;
69765 - file_ops->format.owner = mod;
69766 + pax_open_kernel();
69767 + *(void **)&mod->trace_id.owner = mod;
69768 + *(void **)&mod->trace_enable.owner = mod;
69769 + *(void **)&mod->trace_filter.owner = mod;
69770 + *(void **)&mod->trace_format.owner = mod;
69771 + pax_close_kernel();
69772
69773 list_add(&file_ops->list, &ftrace_module_file_list);
69774
69775 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
69776
69777 for_each_event(call, start, end) {
69778 __trace_add_event_call(*call, mod,
69779 - &file_ops->id, &file_ops->enable,
69780 - &file_ops->filter, &file_ops->format);
69781 + &mod->trace_id, &mod->trace_enable,
69782 + &mod->trace_filter, &mod->trace_format);
69783 }
69784 }
69785
69786 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69787 index 00d527c..7c5b1a3 100644
69788 --- a/kernel/trace/trace_kprobe.c
69789 +++ b/kernel/trace/trace_kprobe.c
69790 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69791 long ret;
69792 int maxlen = get_rloc_len(*(u32 *)dest);
69793 u8 *dst = get_rloc_data(dest);
69794 - u8 *src = addr;
69795 + const u8 __user *src = (const u8 __force_user *)addr;
69796 mm_segment_t old_fs = get_fs();
69797 if (!maxlen)
69798 return;
69799 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69800 pagefault_disable();
69801 do
69802 ret = __copy_from_user_inatomic(dst++, src++, 1);
69803 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69804 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69805 dst[-1] = '\0';
69806 pagefault_enable();
69807 set_fs(old_fs);
69808 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69809 ((u8 *)get_rloc_data(dest))[0] = '\0';
69810 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69811 } else
69812 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69813 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69814 get_rloc_offs(*(u32 *)dest));
69815 }
69816 /* Return the length of string -- including null terminal byte */
69817 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69818 set_fs(KERNEL_DS);
69819 pagefault_disable();
69820 do {
69821 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69822 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69823 len++;
69824 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69825 pagefault_enable();
69826 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69827 index fd3c8aa..5f324a6 100644
69828 --- a/kernel/trace/trace_mmiotrace.c
69829 +++ b/kernel/trace/trace_mmiotrace.c
69830 @@ -24,7 +24,7 @@ struct header_iter {
69831 static struct trace_array *mmio_trace_array;
69832 static bool overrun_detected;
69833 static unsigned long prev_overruns;
69834 -static atomic_t dropped_count;
69835 +static atomic_unchecked_t dropped_count;
69836
69837 static void mmio_reset_data(struct trace_array *tr)
69838 {
69839 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69840
69841 static unsigned long count_overruns(struct trace_iterator *iter)
69842 {
69843 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69844 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69845 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69846
69847 if (over > prev_overruns)
69848 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69849 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69850 sizeof(*entry), 0, pc);
69851 if (!event) {
69852 - atomic_inc(&dropped_count);
69853 + atomic_inc_unchecked(&dropped_count);
69854 return;
69855 }
69856 entry = ring_buffer_event_data(event);
69857 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69858 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69859 sizeof(*entry), 0, pc);
69860 if (!event) {
69861 - atomic_inc(&dropped_count);
69862 + atomic_inc_unchecked(&dropped_count);
69863 return;
69864 }
69865 entry = ring_buffer_event_data(event);
69866 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69867 index 0d6ff35..67e0ed7 100644
69868 --- a/kernel/trace/trace_output.c
69869 +++ b/kernel/trace/trace_output.c
69870 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69871
69872 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69873 if (!IS_ERR(p)) {
69874 - p = mangle_path(s->buffer + s->len, p, "\n");
69875 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69876 if (p) {
69877 s->len = p - s->buffer;
69878 return 1;
69879 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69880 index d4545f4..a9010a1 100644
69881 --- a/kernel/trace/trace_stack.c
69882 +++ b/kernel/trace/trace_stack.c
69883 @@ -53,7 +53,7 @@ static inline void check_stack(void)
69884 return;
69885
69886 /* we do not handle interrupt stacks yet */
69887 - if (!object_is_on_stack(&this_size))
69888 + if (!object_starts_on_stack(&this_size))
69889 return;
69890
69891 local_irq_save(flags);
69892 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69893 index 209b379..7f76423 100644
69894 --- a/kernel/trace/trace_workqueue.c
69895 +++ b/kernel/trace/trace_workqueue.c
69896 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69897 int cpu;
69898 pid_t pid;
69899 /* Can be inserted from interrupt or user context, need to be atomic */
69900 - atomic_t inserted;
69901 + atomic_unchecked_t inserted;
69902 /*
69903 * Don't need to be atomic, works are serialized in a single workqueue thread
69904 * on a single CPU.
69905 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69906 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69907 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69908 if (node->pid == wq_thread->pid) {
69909 - atomic_inc(&node->inserted);
69910 + atomic_inc_unchecked(&node->inserted);
69911 goto found;
69912 }
69913 }
69914 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69915 tsk = get_pid_task(pid, PIDTYPE_PID);
69916 if (tsk) {
69917 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69918 - atomic_read(&cws->inserted), cws->executed,
69919 + atomic_read_unchecked(&cws->inserted), cws->executed,
69920 tsk->comm);
69921 put_task_struct(tsk);
69922 }
69923 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69924 index 8745ac7..d144e37 100644
69925 --- a/lib/Kconfig.debug
69926 +++ b/lib/Kconfig.debug
69927 @@ -1103,6 +1103,7 @@ config LATENCYTOP
69928 depends on DEBUG_KERNEL
69929 depends on STACKTRACE_SUPPORT
69930 depends on PROC_FS
69931 + depends on !GRKERNSEC_HIDESYM
69932 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69933 select KALLSYMS
69934 select KALLSYMS_ALL
69935 diff --git a/lib/bitmap.c b/lib/bitmap.c
69936 index 0d4a127..33a06c7 100644
69937 --- a/lib/bitmap.c
69938 +++ b/lib/bitmap.c
69939 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69940 {
69941 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69942 u32 chunk;
69943 - const char __user __force *ubuf = (const char __user __force *)buf;
69944 + const char __user *ubuf = (const char __force_user *)buf;
69945
69946 bitmap_zero(maskp, nmaskbits);
69947
69948 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69949 {
69950 if (!access_ok(VERIFY_READ, ubuf, ulen))
69951 return -EFAULT;
69952 - return __bitmap_parse((const char __force *)ubuf,
69953 + return __bitmap_parse((const char __force_kernel *)ubuf,
69954 ulen, 1, maskp, nmaskbits);
69955
69956 }
69957 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69958 {
69959 unsigned a, b;
69960 int c, old_c, totaldigits;
69961 - const char __user __force *ubuf = (const char __user __force *)buf;
69962 + const char __user *ubuf = (const char __force_user *)buf;
69963 int exp_digit, in_range;
69964
69965 totaldigits = c = 0;
69966 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69967 {
69968 if (!access_ok(VERIFY_READ, ubuf, ulen))
69969 return -EFAULT;
69970 - return __bitmap_parselist((const char __force *)ubuf,
69971 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69972 ulen, 1, maskp, nmaskbits);
69973 }
69974 EXPORT_SYMBOL(bitmap_parselist_user);
69975 diff --git a/lib/bug.c b/lib/bug.c
69976 index a28c141..2bd3d95 100644
69977 --- a/lib/bug.c
69978 +++ b/lib/bug.c
69979 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69980 return BUG_TRAP_TYPE_NONE;
69981
69982 bug = find_bug(bugaddr);
69983 + if (!bug)
69984 + return BUG_TRAP_TYPE_NONE;
69985
69986 file = NULL;
69987 line = 0;
69988 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69989 index 0ab9ae8..f01ceca 100644
69990 --- a/lib/debugobjects.c
69991 +++ b/lib/debugobjects.c
69992 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69993 if (limit > 4)
69994 return;
69995
69996 - is_on_stack = object_is_on_stack(addr);
69997 + is_on_stack = object_starts_on_stack(addr);
69998 if (is_on_stack == onstack)
69999 return;
70000
70001 diff --git a/lib/devres.c b/lib/devres.c
70002 index 9676617..5149e15 100644
70003 --- a/lib/devres.c
70004 +++ b/lib/devres.c
70005 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
70006 void devm_iounmap(struct device *dev, void __iomem *addr)
70007 {
70008 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70009 - (void *)addr));
70010 + (void __force *)addr));
70011 iounmap(addr);
70012 }
70013 EXPORT_SYMBOL(devm_iounmap);
70014 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
70015 {
70016 ioport_unmap(addr);
70017 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70018 - devm_ioport_map_match, (void *)addr));
70019 + devm_ioport_map_match, (void __force *)addr));
70020 }
70021 EXPORT_SYMBOL(devm_ioport_unmap);
70022
70023 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
70024 index fea790a..ebb0e82 100644
70025 --- a/lib/dma-debug.c
70026 +++ b/lib/dma-debug.c
70027 @@ -925,7 +925,7 @@ out:
70028
70029 static void check_for_stack(struct device *dev, void *addr)
70030 {
70031 - if (object_is_on_stack(addr))
70032 + if (object_starts_on_stack(addr))
70033 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70034 "stack [addr=%p]\n", addr);
70035 }
70036 diff --git a/lib/extable.c b/lib/extable.c
70037 index 4cac81e..63e9b8f 100644
70038 --- a/lib/extable.c
70039 +++ b/lib/extable.c
70040 @@ -13,6 +13,7 @@
70041 #include <linux/init.h>
70042 #include <linux/sort.h>
70043 #include <asm/uaccess.h>
70044 +#include <asm/pgtable.h>
70045
70046 #ifndef ARCH_HAS_SORT_EXTABLE
70047 /*
70048 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
70049 void sort_extable(struct exception_table_entry *start,
70050 struct exception_table_entry *finish)
70051 {
70052 + pax_open_kernel();
70053 sort(start, finish - start, sizeof(struct exception_table_entry),
70054 cmp_ex, NULL);
70055 + pax_close_kernel();
70056 }
70057
70058 #ifdef CONFIG_MODULES
70059 diff --git a/lib/inflate.c b/lib/inflate.c
70060 index 013a761..c28f3fc 100644
70061 --- a/lib/inflate.c
70062 +++ b/lib/inflate.c
70063 @@ -269,7 +269,7 @@ static void free(void *where)
70064 malloc_ptr = free_mem_ptr;
70065 }
70066 #else
70067 -#define malloc(a) kmalloc(a, GFP_KERNEL)
70068 +#define malloc(a) kmalloc((a), GFP_KERNEL)
70069 #define free(a) kfree(a)
70070 #endif
70071
70072 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70073 index bd2bea9..6b3c95e 100644
70074 --- a/lib/is_single_threaded.c
70075 +++ b/lib/is_single_threaded.c
70076 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70077 struct task_struct *p, *t;
70078 bool ret;
70079
70080 + if (!mm)
70081 + return true;
70082 +
70083 if (atomic_read(&task->signal->live) != 1)
70084 return false;
70085
70086 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70087 index dc63d08..95ae14a 100644
70088 --- a/lib/radix-tree.c
70089 +++ b/lib/radix-tree.c
70090 @@ -78,7 +78,7 @@ struct radix_tree_preload {
70091 int nr;
70092 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70093 };
70094 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70095 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70096
70097 static inline void *ptr_to_indirect(void *ptr)
70098 {
70099 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70100 index 38e612e..4fb99a8 100644
70101 --- a/lib/vsprintf.c
70102 +++ b/lib/vsprintf.c
70103 @@ -16,6 +16,9 @@
70104 * - scnprintf and vscnprintf
70105 */
70106
70107 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70108 +#define __INCLUDED_BY_HIDESYM 1
70109 +#endif
70110 #include <stdarg.h>
70111 #include <linux/module.h>
70112 #include <linux/types.h>
70113 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70114 char sym[KSYM_SYMBOL_LEN];
70115 if (ext == 'B')
70116 sprint_backtrace(sym, value);
70117 - else if (ext != 'f' && ext != 's')
70118 + else if (ext != 'f' && ext != 's' && ext != 'a')
70119 sprint_symbol(sym, value);
70120 else
70121 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70122 @@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70123 return number(buf, end, *(const netdev_features_t *)addr, spec);
70124 }
70125
70126 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70127 +int kptr_restrict __read_mostly = 2;
70128 +#else
70129 int kptr_restrict __read_mostly;
70130 +#endif
70131
70132 /*
70133 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70134 @@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
70135 * - 'S' For symbolic direct pointers with offset
70136 * - 's' For symbolic direct pointers without offset
70137 * - 'B' For backtraced symbolic direct pointers with offset
70138 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70139 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70140 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70141 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70142 * - 'M' For a 6-byte MAC address, it prints the address in the
70143 @@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70144 {
70145 if (!ptr && *fmt != 'K') {
70146 /*
70147 - * Print (null) with the same width as a pointer so it makes
70148 + * Print (nil) with the same width as a pointer so it makes
70149 * tabular output look nice.
70150 */
70151 if (spec.field_width == -1)
70152 spec.field_width = 2 * sizeof(void *);
70153 - return string(buf, end, "(null)", spec);
70154 + return string(buf, end, "(nil)", spec);
70155 }
70156
70157 switch (*fmt) {
70158 @@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70159 /* Fallthrough */
70160 case 'S':
70161 case 's':
70162 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70163 + break;
70164 +#else
70165 + return symbol_string(buf, end, ptr, spec, *fmt);
70166 +#endif
70167 + case 'A':
70168 + case 'a':
70169 case 'B':
70170 return symbol_string(buf, end, ptr, spec, *fmt);
70171 case 'R':
70172 @@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70173 typeof(type) value; \
70174 if (sizeof(type) == 8) { \
70175 args = PTR_ALIGN(args, sizeof(u32)); \
70176 - *(u32 *)&value = *(u32 *)args; \
70177 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70178 + *(u32 *)&value = *(const u32 *)args; \
70179 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70180 } else { \
70181 args = PTR_ALIGN(args, sizeof(type)); \
70182 - value = *(typeof(type) *)args; \
70183 + value = *(const typeof(type) *)args; \
70184 } \
70185 args += sizeof(type); \
70186 value; \
70187 @@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70188 case FORMAT_TYPE_STR: {
70189 const char *str_arg = args;
70190 args += strlen(str_arg) + 1;
70191 - str = string(str, end, (char *)str_arg, spec);
70192 + str = string(str, end, str_arg, spec);
70193 break;
70194 }
70195
70196 diff --git a/localversion-grsec b/localversion-grsec
70197 new file mode 100644
70198 index 0000000..7cd6065
70199 --- /dev/null
70200 +++ b/localversion-grsec
70201 @@ -0,0 +1 @@
70202 +-grsec
70203 diff --git a/mm/Kconfig b/mm/Kconfig
70204 index e338407..49b5b7a 100644
70205 --- a/mm/Kconfig
70206 +++ b/mm/Kconfig
70207 @@ -247,10 +247,10 @@ config KSM
70208 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70209
70210 config DEFAULT_MMAP_MIN_ADDR
70211 - int "Low address space to protect from user allocation"
70212 + int "Low address space to protect from user allocation"
70213 depends on MMU
70214 - default 4096
70215 - help
70216 + default 65536
70217 + help
70218 This is the portion of low virtual memory which should be protected
70219 from userspace allocation. Keeping a user from writing to low pages
70220 can help reduce the impact of kernel NULL pointer bugs.
70221 diff --git a/mm/filemap.c b/mm/filemap.c
70222 index b662757..3081ddd 100644
70223 --- a/mm/filemap.c
70224 +++ b/mm/filemap.c
70225 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70226 struct address_space *mapping = file->f_mapping;
70227
70228 if (!mapping->a_ops->readpage)
70229 - return -ENOEXEC;
70230 + return -ENODEV;
70231 file_accessed(file);
70232 vma->vm_ops = &generic_file_vm_ops;
70233 vma->vm_flags |= VM_CAN_NONLINEAR;
70234 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70235 *pos = i_size_read(inode);
70236
70237 if (limit != RLIM_INFINITY) {
70238 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70239 if (*pos >= limit) {
70240 send_sig(SIGXFSZ, current, 0);
70241 return -EFBIG;
70242 diff --git a/mm/fremap.c b/mm/fremap.c
70243 index 9ed4fd4..c42648d 100644
70244 --- a/mm/fremap.c
70245 +++ b/mm/fremap.c
70246 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70247 retry:
70248 vma = find_vma(mm, start);
70249
70250 +#ifdef CONFIG_PAX_SEGMEXEC
70251 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70252 + goto out;
70253 +#endif
70254 +
70255 /*
70256 * Make sure the vma is shared, that it supports prefaulting,
70257 * and that the remapped range is valid and fully within
70258 diff --git a/mm/highmem.c b/mm/highmem.c
70259 index 57d82c6..e9e0552 100644
70260 --- a/mm/highmem.c
70261 +++ b/mm/highmem.c
70262 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70263 * So no dangers, even with speculative execution.
70264 */
70265 page = pte_page(pkmap_page_table[i]);
70266 + pax_open_kernel();
70267 pte_clear(&init_mm, (unsigned long)page_address(page),
70268 &pkmap_page_table[i]);
70269 -
70270 + pax_close_kernel();
70271 set_page_address(page, NULL);
70272 need_flush = 1;
70273 }
70274 @@ -186,9 +187,11 @@ start:
70275 }
70276 }
70277 vaddr = PKMAP_ADDR(last_pkmap_nr);
70278 +
70279 + pax_open_kernel();
70280 set_pte_at(&init_mm, vaddr,
70281 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70282 -
70283 + pax_close_kernel();
70284 pkmap_count[last_pkmap_nr] = 1;
70285 set_page_address(page, (void *)vaddr);
70286
70287 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70288 index 8f7fc39..69bf1e9 100644
70289 --- a/mm/huge_memory.c
70290 +++ b/mm/huge_memory.c
70291 @@ -733,7 +733,7 @@ out:
70292 * run pte_offset_map on the pmd, if an huge pmd could
70293 * materialize from under us from a different thread.
70294 */
70295 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70296 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70297 return VM_FAULT_OOM;
70298 /* if an huge pmd materialized from under us just retry later */
70299 if (unlikely(pmd_trans_huge(*pmd)))
70300 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70301 index a7cf829..d60e0e1 100644
70302 --- a/mm/hugetlb.c
70303 +++ b/mm/hugetlb.c
70304 @@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70305 return 1;
70306 }
70307
70308 +#ifdef CONFIG_PAX_SEGMEXEC
70309 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70310 +{
70311 + struct mm_struct *mm = vma->vm_mm;
70312 + struct vm_area_struct *vma_m;
70313 + unsigned long address_m;
70314 + pte_t *ptep_m;
70315 +
70316 + vma_m = pax_find_mirror_vma(vma);
70317 + if (!vma_m)
70318 + return;
70319 +
70320 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70321 + address_m = address + SEGMEXEC_TASK_SIZE;
70322 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70323 + get_page(page_m);
70324 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70325 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70326 +}
70327 +#endif
70328 +
70329 /*
70330 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70331 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70332 @@ -2459,6 +2480,11 @@ retry_avoidcopy:
70333 make_huge_pte(vma, new_page, 1));
70334 page_remove_rmap(old_page);
70335 hugepage_add_new_anon_rmap(new_page, vma, address);
70336 +
70337 +#ifdef CONFIG_PAX_SEGMEXEC
70338 + pax_mirror_huge_pte(vma, address, new_page);
70339 +#endif
70340 +
70341 /* Make the old page be freed below */
70342 new_page = old_page;
70343 mmu_notifier_invalidate_range_end(mm,
70344 @@ -2613,6 +2639,10 @@ retry:
70345 && (vma->vm_flags & VM_SHARED)));
70346 set_huge_pte_at(mm, address, ptep, new_pte);
70347
70348 +#ifdef CONFIG_PAX_SEGMEXEC
70349 + pax_mirror_huge_pte(vma, address, page);
70350 +#endif
70351 +
70352 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70353 /* Optimization, do the COW without a second fault */
70354 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70355 @@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70356 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70357 struct hstate *h = hstate_vma(vma);
70358
70359 +#ifdef CONFIG_PAX_SEGMEXEC
70360 + struct vm_area_struct *vma_m;
70361 +#endif
70362 +
70363 address &= huge_page_mask(h);
70364
70365 ptep = huge_pte_offset(mm, address);
70366 @@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70367 VM_FAULT_SET_HINDEX(h - hstates);
70368 }
70369
70370 +#ifdef CONFIG_PAX_SEGMEXEC
70371 + vma_m = pax_find_mirror_vma(vma);
70372 + if (vma_m) {
70373 + unsigned long address_m;
70374 +
70375 + if (vma->vm_start > vma_m->vm_start) {
70376 + address_m = address;
70377 + address -= SEGMEXEC_TASK_SIZE;
70378 + vma = vma_m;
70379 + h = hstate_vma(vma);
70380 + } else
70381 + address_m = address + SEGMEXEC_TASK_SIZE;
70382 +
70383 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70384 + return VM_FAULT_OOM;
70385 + address_m &= HPAGE_MASK;
70386 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70387 + }
70388 +#endif
70389 +
70390 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70391 if (!ptep)
70392 return VM_FAULT_OOM;
70393 diff --git a/mm/internal.h b/mm/internal.h
70394 index 2189af4..f2ca332 100644
70395 --- a/mm/internal.h
70396 +++ b/mm/internal.h
70397 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70398 * in mm/page_alloc.c
70399 */
70400 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70401 +extern void free_compound_page(struct page *page);
70402 extern void prep_compound_page(struct page *page, unsigned long order);
70403 #ifdef CONFIG_MEMORY_FAILURE
70404 extern bool is_free_buddy_page(struct page *page);
70405 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70406 index 45eb621..6ccd8ea 100644
70407 --- a/mm/kmemleak.c
70408 +++ b/mm/kmemleak.c
70409 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70410
70411 for (i = 0; i < object->trace_len; i++) {
70412 void *ptr = (void *)object->trace[i];
70413 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70414 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70415 }
70416 }
70417
70418 diff --git a/mm/maccess.c b/mm/maccess.c
70419 index d53adf9..03a24bf 100644
70420 --- a/mm/maccess.c
70421 +++ b/mm/maccess.c
70422 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70423 set_fs(KERNEL_DS);
70424 pagefault_disable();
70425 ret = __copy_from_user_inatomic(dst,
70426 - (__force const void __user *)src, size);
70427 + (const void __force_user *)src, size);
70428 pagefault_enable();
70429 set_fs(old_fs);
70430
70431 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70432
70433 set_fs(KERNEL_DS);
70434 pagefault_disable();
70435 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70436 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70437 pagefault_enable();
70438 set_fs(old_fs);
70439
70440 diff --git a/mm/madvise.c b/mm/madvise.c
70441 index 74bf193..feb6fd3 100644
70442 --- a/mm/madvise.c
70443 +++ b/mm/madvise.c
70444 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70445 pgoff_t pgoff;
70446 unsigned long new_flags = vma->vm_flags;
70447
70448 +#ifdef CONFIG_PAX_SEGMEXEC
70449 + struct vm_area_struct *vma_m;
70450 +#endif
70451 +
70452 switch (behavior) {
70453 case MADV_NORMAL:
70454 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70455 @@ -110,6 +114,13 @@ success:
70456 /*
70457 * vm_flags is protected by the mmap_sem held in write mode.
70458 */
70459 +
70460 +#ifdef CONFIG_PAX_SEGMEXEC
70461 + vma_m = pax_find_mirror_vma(vma);
70462 + if (vma_m)
70463 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70464 +#endif
70465 +
70466 vma->vm_flags = new_flags;
70467
70468 out:
70469 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70470 struct vm_area_struct ** prev,
70471 unsigned long start, unsigned long end)
70472 {
70473 +
70474 +#ifdef CONFIG_PAX_SEGMEXEC
70475 + struct vm_area_struct *vma_m;
70476 +#endif
70477 +
70478 *prev = vma;
70479 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70480 return -EINVAL;
70481 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70482 zap_page_range(vma, start, end - start, &details);
70483 } else
70484 zap_page_range(vma, start, end - start, NULL);
70485 +
70486 +#ifdef CONFIG_PAX_SEGMEXEC
70487 + vma_m = pax_find_mirror_vma(vma);
70488 + if (vma_m) {
70489 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70490 + struct zap_details details = {
70491 + .nonlinear_vma = vma_m,
70492 + .last_index = ULONG_MAX,
70493 + };
70494 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70495 + } else
70496 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70497 + }
70498 +#endif
70499 +
70500 return 0;
70501 }
70502
70503 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70504 if (end < start)
70505 goto out;
70506
70507 +#ifdef CONFIG_PAX_SEGMEXEC
70508 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70509 + if (end > SEGMEXEC_TASK_SIZE)
70510 + goto out;
70511 + } else
70512 +#endif
70513 +
70514 + if (end > TASK_SIZE)
70515 + goto out;
70516 +
70517 error = 0;
70518 if (end == start)
70519 goto out;
70520 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70521 index 56080ea..115071e 100644
70522 --- a/mm/memory-failure.c
70523 +++ b/mm/memory-failure.c
70524 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70525
70526 int sysctl_memory_failure_recovery __read_mostly = 1;
70527
70528 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70529 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70530
70531 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70532
70533 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70534 si.si_signo = SIGBUS;
70535 si.si_errno = 0;
70536 si.si_code = BUS_MCEERR_AO;
70537 - si.si_addr = (void *)addr;
70538 + si.si_addr = (void __user *)addr;
70539 #ifdef __ARCH_SI_TRAPNO
70540 si.si_trapno = trapno;
70541 #endif
70542 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70543 }
70544
70545 nr_pages = 1 << compound_trans_order(hpage);
70546 - atomic_long_add(nr_pages, &mce_bad_pages);
70547 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70548
70549 /*
70550 * We need/can do nothing about count=0 pages.
70551 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70552 if (!PageHWPoison(hpage)
70553 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70554 || (p != hpage && TestSetPageHWPoison(hpage))) {
70555 - atomic_long_sub(nr_pages, &mce_bad_pages);
70556 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70557 return 0;
70558 }
70559 set_page_hwpoison_huge_page(hpage);
70560 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70561 }
70562 if (hwpoison_filter(p)) {
70563 if (TestClearPageHWPoison(p))
70564 - atomic_long_sub(nr_pages, &mce_bad_pages);
70565 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70566 unlock_page(hpage);
70567 put_page(hpage);
70568 return 0;
70569 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
70570 return 0;
70571 }
70572 if (TestClearPageHWPoison(p))
70573 - atomic_long_sub(nr_pages, &mce_bad_pages);
70574 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70575 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70576 return 0;
70577 }
70578 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
70579 */
70580 if (TestClearPageHWPoison(page)) {
70581 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70582 - atomic_long_sub(nr_pages, &mce_bad_pages);
70583 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70584 freeit = 1;
70585 if (PageHuge(page))
70586 clear_page_hwpoison_huge_page(page);
70587 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70588 }
70589 done:
70590 if (!PageHWPoison(hpage))
70591 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70592 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70593 set_page_hwpoison_huge_page(hpage);
70594 dequeue_hwpoisoned_huge_page(hpage);
70595 /* keep elevated page count for bad page */
70596 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
70597 return ret;
70598
70599 done:
70600 - atomic_long_add(1, &mce_bad_pages);
70601 + atomic_long_add_unchecked(1, &mce_bad_pages);
70602 SetPageHWPoison(page);
70603 /* keep elevated page count for bad page */
70604 return ret;
70605 diff --git a/mm/memory.c b/mm/memory.c
70606 index 10b4dda..764ee07 100644
70607 --- a/mm/memory.c
70608 +++ b/mm/memory.c
70609 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70610 return;
70611
70612 pmd = pmd_offset(pud, start);
70613 +
70614 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70615 pud_clear(pud);
70616 pmd_free_tlb(tlb, pmd, start);
70617 +#endif
70618 +
70619 }
70620
70621 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70622 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70623 if (end - 1 > ceiling - 1)
70624 return;
70625
70626 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70627 pud = pud_offset(pgd, start);
70628 pgd_clear(pgd);
70629 pud_free_tlb(tlb, pud, start);
70630 +#endif
70631 +
70632 }
70633
70634 /*
70635 @@ -1593,12 +1600,6 @@ no_page_table:
70636 return page;
70637 }
70638
70639 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70640 -{
70641 - return stack_guard_page_start(vma, addr) ||
70642 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70643 -}
70644 -
70645 /**
70646 * __get_user_pages() - pin user pages in memory
70647 * @tsk: task_struct of target task
70648 @@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70649 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70650 i = 0;
70651
70652 - do {
70653 + while (nr_pages) {
70654 struct vm_area_struct *vma;
70655
70656 - vma = find_extend_vma(mm, start);
70657 + vma = find_vma(mm, start);
70658 if (!vma && in_gate_area(mm, start)) {
70659 unsigned long pg = start & PAGE_MASK;
70660 pgd_t *pgd;
70661 @@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70662 goto next_page;
70663 }
70664
70665 - if (!vma ||
70666 + if (!vma || start < vma->vm_start ||
70667 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70668 !(vm_flags & vma->vm_flags))
70669 return i ? : -EFAULT;
70670 @@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70671 int ret;
70672 unsigned int fault_flags = 0;
70673
70674 - /* For mlock, just skip the stack guard page. */
70675 - if (foll_flags & FOLL_MLOCK) {
70676 - if (stack_guard_page(vma, start))
70677 - goto next_page;
70678 - }
70679 if (foll_flags & FOLL_WRITE)
70680 fault_flags |= FAULT_FLAG_WRITE;
70681 if (nonblocking)
70682 @@ -1827,7 +1823,7 @@ next_page:
70683 start += PAGE_SIZE;
70684 nr_pages--;
70685 } while (nr_pages && start < vma->vm_end);
70686 - } while (nr_pages);
70687 + }
70688 return i;
70689 }
70690 EXPORT_SYMBOL(__get_user_pages);
70691 @@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70692 page_add_file_rmap(page);
70693 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70694
70695 +#ifdef CONFIG_PAX_SEGMEXEC
70696 + pax_mirror_file_pte(vma, addr, page, ptl);
70697 +#endif
70698 +
70699 retval = 0;
70700 pte_unmap_unlock(pte, ptl);
70701 return retval;
70702 @@ -2068,10 +2068,22 @@ out:
70703 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70704 struct page *page)
70705 {
70706 +
70707 +#ifdef CONFIG_PAX_SEGMEXEC
70708 + struct vm_area_struct *vma_m;
70709 +#endif
70710 +
70711 if (addr < vma->vm_start || addr >= vma->vm_end)
70712 return -EFAULT;
70713 if (!page_count(page))
70714 return -EINVAL;
70715 +
70716 +#ifdef CONFIG_PAX_SEGMEXEC
70717 + vma_m = pax_find_mirror_vma(vma);
70718 + if (vma_m)
70719 + vma_m->vm_flags |= VM_INSERTPAGE;
70720 +#endif
70721 +
70722 vma->vm_flags |= VM_INSERTPAGE;
70723 return insert_page(vma, addr, page, vma->vm_page_prot);
70724 }
70725 @@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70726 unsigned long pfn)
70727 {
70728 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70729 + BUG_ON(vma->vm_mirror);
70730
70731 if (addr < vma->vm_start || addr >= vma->vm_end)
70732 return -EFAULT;
70733 @@ -2472,6 +2485,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70734 copy_user_highpage(dst, src, va, vma);
70735 }
70736
70737 +#ifdef CONFIG_PAX_SEGMEXEC
70738 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70739 +{
70740 + struct mm_struct *mm = vma->vm_mm;
70741 + spinlock_t *ptl;
70742 + pte_t *pte, entry;
70743 +
70744 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70745 + entry = *pte;
70746 + if (!pte_present(entry)) {
70747 + if (!pte_none(entry)) {
70748 + BUG_ON(pte_file(entry));
70749 + free_swap_and_cache(pte_to_swp_entry(entry));
70750 + pte_clear_not_present_full(mm, address, pte, 0);
70751 + }
70752 + } else {
70753 + struct page *page;
70754 +
70755 + flush_cache_page(vma, address, pte_pfn(entry));
70756 + entry = ptep_clear_flush(vma, address, pte);
70757 + BUG_ON(pte_dirty(entry));
70758 + page = vm_normal_page(vma, address, entry);
70759 + if (page) {
70760 + update_hiwater_rss(mm);
70761 + if (PageAnon(page))
70762 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70763 + else
70764 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70765 + page_remove_rmap(page);
70766 + page_cache_release(page);
70767 + }
70768 + }
70769 + pte_unmap_unlock(pte, ptl);
70770 +}
70771 +
70772 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70773 + *
70774 + * the ptl of the lower mapped page is held on entry and is not released on exit
70775 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70776 + */
70777 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70778 +{
70779 + struct mm_struct *mm = vma->vm_mm;
70780 + unsigned long address_m;
70781 + spinlock_t *ptl_m;
70782 + struct vm_area_struct *vma_m;
70783 + pmd_t *pmd_m;
70784 + pte_t *pte_m, entry_m;
70785 +
70786 + BUG_ON(!page_m || !PageAnon(page_m));
70787 +
70788 + vma_m = pax_find_mirror_vma(vma);
70789 + if (!vma_m)
70790 + return;
70791 +
70792 + BUG_ON(!PageLocked(page_m));
70793 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70794 + address_m = address + SEGMEXEC_TASK_SIZE;
70795 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70796 + pte_m = pte_offset_map(pmd_m, address_m);
70797 + ptl_m = pte_lockptr(mm, pmd_m);
70798 + if (ptl != ptl_m) {
70799 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70800 + if (!pte_none(*pte_m))
70801 + goto out;
70802 + }
70803 +
70804 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70805 + page_cache_get(page_m);
70806 + page_add_anon_rmap(page_m, vma_m, address_m);
70807 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70808 + set_pte_at(mm, address_m, pte_m, entry_m);
70809 + update_mmu_cache(vma_m, address_m, entry_m);
70810 +out:
70811 + if (ptl != ptl_m)
70812 + spin_unlock(ptl_m);
70813 + pte_unmap(pte_m);
70814 + unlock_page(page_m);
70815 +}
70816 +
70817 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70818 +{
70819 + struct mm_struct *mm = vma->vm_mm;
70820 + unsigned long address_m;
70821 + spinlock_t *ptl_m;
70822 + struct vm_area_struct *vma_m;
70823 + pmd_t *pmd_m;
70824 + pte_t *pte_m, entry_m;
70825 +
70826 + BUG_ON(!page_m || PageAnon(page_m));
70827 +
70828 + vma_m = pax_find_mirror_vma(vma);
70829 + if (!vma_m)
70830 + return;
70831 +
70832 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70833 + address_m = address + SEGMEXEC_TASK_SIZE;
70834 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70835 + pte_m = pte_offset_map(pmd_m, address_m);
70836 + ptl_m = pte_lockptr(mm, pmd_m);
70837 + if (ptl != ptl_m) {
70838 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70839 + if (!pte_none(*pte_m))
70840 + goto out;
70841 + }
70842 +
70843 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70844 + page_cache_get(page_m);
70845 + page_add_file_rmap(page_m);
70846 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70847 + set_pte_at(mm, address_m, pte_m, entry_m);
70848 + update_mmu_cache(vma_m, address_m, entry_m);
70849 +out:
70850 + if (ptl != ptl_m)
70851 + spin_unlock(ptl_m);
70852 + pte_unmap(pte_m);
70853 +}
70854 +
70855 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70856 +{
70857 + struct mm_struct *mm = vma->vm_mm;
70858 + unsigned long address_m;
70859 + spinlock_t *ptl_m;
70860 + struct vm_area_struct *vma_m;
70861 + pmd_t *pmd_m;
70862 + pte_t *pte_m, entry_m;
70863 +
70864 + vma_m = pax_find_mirror_vma(vma);
70865 + if (!vma_m)
70866 + return;
70867 +
70868 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70869 + address_m = address + SEGMEXEC_TASK_SIZE;
70870 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70871 + pte_m = pte_offset_map(pmd_m, address_m);
70872 + ptl_m = pte_lockptr(mm, pmd_m);
70873 + if (ptl != ptl_m) {
70874 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70875 + if (!pte_none(*pte_m))
70876 + goto out;
70877 + }
70878 +
70879 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70880 + set_pte_at(mm, address_m, pte_m, entry_m);
70881 +out:
70882 + if (ptl != ptl_m)
70883 + spin_unlock(ptl_m);
70884 + pte_unmap(pte_m);
70885 +}
70886 +
70887 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70888 +{
70889 + struct page *page_m;
70890 + pte_t entry;
70891 +
70892 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70893 + goto out;
70894 +
70895 + entry = *pte;
70896 + page_m = vm_normal_page(vma, address, entry);
70897 + if (!page_m)
70898 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70899 + else if (PageAnon(page_m)) {
70900 + if (pax_find_mirror_vma(vma)) {
70901 + pte_unmap_unlock(pte, ptl);
70902 + lock_page(page_m);
70903 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70904 + if (pte_same(entry, *pte))
70905 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70906 + else
70907 + unlock_page(page_m);
70908 + }
70909 + } else
70910 + pax_mirror_file_pte(vma, address, page_m, ptl);
70911 +
70912 +out:
70913 + pte_unmap_unlock(pte, ptl);
70914 +}
70915 +#endif
70916 +
70917 /*
70918 * This routine handles present pages, when users try to write
70919 * to a shared page. It is done by copying the page to a new address
70920 @@ -2683,6 +2876,12 @@ gotten:
70921 */
70922 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70923 if (likely(pte_same(*page_table, orig_pte))) {
70924 +
70925 +#ifdef CONFIG_PAX_SEGMEXEC
70926 + if (pax_find_mirror_vma(vma))
70927 + BUG_ON(!trylock_page(new_page));
70928 +#endif
70929 +
70930 if (old_page) {
70931 if (!PageAnon(old_page)) {
70932 dec_mm_counter_fast(mm, MM_FILEPAGES);
70933 @@ -2734,6 +2933,10 @@ gotten:
70934 page_remove_rmap(old_page);
70935 }
70936
70937 +#ifdef CONFIG_PAX_SEGMEXEC
70938 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70939 +#endif
70940 +
70941 /* Free the old page.. */
70942 new_page = old_page;
70943 ret |= VM_FAULT_WRITE;
70944 @@ -3013,6 +3216,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70945 swap_free(entry);
70946 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70947 try_to_free_swap(page);
70948 +
70949 +#ifdef CONFIG_PAX_SEGMEXEC
70950 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70951 +#endif
70952 +
70953 unlock_page(page);
70954 if (swapcache) {
70955 /*
70956 @@ -3036,6 +3244,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70957
70958 /* No need to invalidate - it was non-present before */
70959 update_mmu_cache(vma, address, page_table);
70960 +
70961 +#ifdef CONFIG_PAX_SEGMEXEC
70962 + pax_mirror_anon_pte(vma, address, page, ptl);
70963 +#endif
70964 +
70965 unlock:
70966 pte_unmap_unlock(page_table, ptl);
70967 out:
70968 @@ -3055,40 +3268,6 @@ out_release:
70969 }
70970
70971 /*
70972 - * This is like a special single-page "expand_{down|up}wards()",
70973 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70974 - * doesn't hit another vma.
70975 - */
70976 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70977 -{
70978 - address &= PAGE_MASK;
70979 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70980 - struct vm_area_struct *prev = vma->vm_prev;
70981 -
70982 - /*
70983 - * Is there a mapping abutting this one below?
70984 - *
70985 - * That's only ok if it's the same stack mapping
70986 - * that has gotten split..
70987 - */
70988 - if (prev && prev->vm_end == address)
70989 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70990 -
70991 - expand_downwards(vma, address - PAGE_SIZE);
70992 - }
70993 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70994 - struct vm_area_struct *next = vma->vm_next;
70995 -
70996 - /* As VM_GROWSDOWN but s/below/above/ */
70997 - if (next && next->vm_start == address + PAGE_SIZE)
70998 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70999 -
71000 - expand_upwards(vma, address + PAGE_SIZE);
71001 - }
71002 - return 0;
71003 -}
71004 -
71005 -/*
71006 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71007 * but allow concurrent faults), and pte mapped but not yet locked.
71008 * We return with mmap_sem still held, but pte unmapped and unlocked.
71009 @@ -3097,27 +3276,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71010 unsigned long address, pte_t *page_table, pmd_t *pmd,
71011 unsigned int flags)
71012 {
71013 - struct page *page;
71014 + struct page *page = NULL;
71015 spinlock_t *ptl;
71016 pte_t entry;
71017
71018 - pte_unmap(page_table);
71019 -
71020 - /* Check if we need to add a guard page to the stack */
71021 - if (check_stack_guard_page(vma, address) < 0)
71022 - return VM_FAULT_SIGBUS;
71023 -
71024 - /* Use the zero-page for reads */
71025 if (!(flags & FAULT_FLAG_WRITE)) {
71026 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71027 vma->vm_page_prot));
71028 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71029 + ptl = pte_lockptr(mm, pmd);
71030 + spin_lock(ptl);
71031 if (!pte_none(*page_table))
71032 goto unlock;
71033 goto setpte;
71034 }
71035
71036 /* Allocate our own private page. */
71037 + pte_unmap(page_table);
71038 +
71039 if (unlikely(anon_vma_prepare(vma)))
71040 goto oom;
71041 page = alloc_zeroed_user_highpage_movable(vma, address);
71042 @@ -3136,6 +3311,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71043 if (!pte_none(*page_table))
71044 goto release;
71045
71046 +#ifdef CONFIG_PAX_SEGMEXEC
71047 + if (pax_find_mirror_vma(vma))
71048 + BUG_ON(!trylock_page(page));
71049 +#endif
71050 +
71051 inc_mm_counter_fast(mm, MM_ANONPAGES);
71052 page_add_new_anon_rmap(page, vma, address);
71053 setpte:
71054 @@ -3143,6 +3323,12 @@ setpte:
71055
71056 /* No need to invalidate - it was non-present before */
71057 update_mmu_cache(vma, address, page_table);
71058 +
71059 +#ifdef CONFIG_PAX_SEGMEXEC
71060 + if (page)
71061 + pax_mirror_anon_pte(vma, address, page, ptl);
71062 +#endif
71063 +
71064 unlock:
71065 pte_unmap_unlock(page_table, ptl);
71066 return 0;
71067 @@ -3286,6 +3472,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71068 */
71069 /* Only go through if we didn't race with anybody else... */
71070 if (likely(pte_same(*page_table, orig_pte))) {
71071 +
71072 +#ifdef CONFIG_PAX_SEGMEXEC
71073 + if (anon && pax_find_mirror_vma(vma))
71074 + BUG_ON(!trylock_page(page));
71075 +#endif
71076 +
71077 flush_icache_page(vma, page);
71078 entry = mk_pte(page, vma->vm_page_prot);
71079 if (flags & FAULT_FLAG_WRITE)
71080 @@ -3305,6 +3497,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71081
71082 /* no need to invalidate: a not-present page won't be cached */
71083 update_mmu_cache(vma, address, page_table);
71084 +
71085 +#ifdef CONFIG_PAX_SEGMEXEC
71086 + if (anon)
71087 + pax_mirror_anon_pte(vma, address, page, ptl);
71088 + else
71089 + pax_mirror_file_pte(vma, address, page, ptl);
71090 +#endif
71091 +
71092 } else {
71093 if (cow_page)
71094 mem_cgroup_uncharge_page(cow_page);
71095 @@ -3458,6 +3658,12 @@ int handle_pte_fault(struct mm_struct *mm,
71096 if (flags & FAULT_FLAG_WRITE)
71097 flush_tlb_fix_spurious_fault(vma, address);
71098 }
71099 +
71100 +#ifdef CONFIG_PAX_SEGMEXEC
71101 + pax_mirror_pte(vma, address, pte, pmd, ptl);
71102 + return 0;
71103 +#endif
71104 +
71105 unlock:
71106 pte_unmap_unlock(pte, ptl);
71107 return 0;
71108 @@ -3474,6 +3680,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71109 pmd_t *pmd;
71110 pte_t *pte;
71111
71112 +#ifdef CONFIG_PAX_SEGMEXEC
71113 + struct vm_area_struct *vma_m;
71114 +#endif
71115 +
71116 __set_current_state(TASK_RUNNING);
71117
71118 count_vm_event(PGFAULT);
71119 @@ -3485,6 +3695,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71120 if (unlikely(is_vm_hugetlb_page(vma)))
71121 return hugetlb_fault(mm, vma, address, flags);
71122
71123 +#ifdef CONFIG_PAX_SEGMEXEC
71124 + vma_m = pax_find_mirror_vma(vma);
71125 + if (vma_m) {
71126 + unsigned long address_m;
71127 + pgd_t *pgd_m;
71128 + pud_t *pud_m;
71129 + pmd_t *pmd_m;
71130 +
71131 + if (vma->vm_start > vma_m->vm_start) {
71132 + address_m = address;
71133 + address -= SEGMEXEC_TASK_SIZE;
71134 + vma = vma_m;
71135 + } else
71136 + address_m = address + SEGMEXEC_TASK_SIZE;
71137 +
71138 + pgd_m = pgd_offset(mm, address_m);
71139 + pud_m = pud_alloc(mm, pgd_m, address_m);
71140 + if (!pud_m)
71141 + return VM_FAULT_OOM;
71142 + pmd_m = pmd_alloc(mm, pud_m, address_m);
71143 + if (!pmd_m)
71144 + return VM_FAULT_OOM;
71145 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71146 + return VM_FAULT_OOM;
71147 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71148 + }
71149 +#endif
71150 +
71151 pgd = pgd_offset(mm, address);
71152 pud = pud_alloc(mm, pgd, address);
71153 if (!pud)
71154 @@ -3514,7 +3752,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71155 * run pte_offset_map on the pmd, if an huge pmd could
71156 * materialize from under us from a different thread.
71157 */
71158 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71159 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71160 return VM_FAULT_OOM;
71161 /* if an huge pmd materialized from under us just retry later */
71162 if (unlikely(pmd_trans_huge(*pmd)))
71163 @@ -3618,7 +3856,7 @@ static int __init gate_vma_init(void)
71164 gate_vma.vm_start = FIXADDR_USER_START;
71165 gate_vma.vm_end = FIXADDR_USER_END;
71166 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71167 - gate_vma.vm_page_prot = __P101;
71168 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71169 /*
71170 * Make sure the vDSO gets into every core dump.
71171 * Dumping its contents makes post-mortem fully interpretable later
71172 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71173 index 0a37570..2048346 100644
71174 --- a/mm/mempolicy.c
71175 +++ b/mm/mempolicy.c
71176 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71177 unsigned long vmstart;
71178 unsigned long vmend;
71179
71180 +#ifdef CONFIG_PAX_SEGMEXEC
71181 + struct vm_area_struct *vma_m;
71182 +#endif
71183 +
71184 vma = find_vma(mm, start);
71185 if (!vma || vma->vm_start > start)
71186 return -EFAULT;
71187 @@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71188 err = policy_vma(vma, new_pol);
71189 if (err)
71190 goto out;
71191 +
71192 +#ifdef CONFIG_PAX_SEGMEXEC
71193 + vma_m = pax_find_mirror_vma(vma);
71194 + if (vma_m) {
71195 + err = policy_vma(vma_m, new_pol);
71196 + if (err)
71197 + goto out;
71198 + }
71199 +#endif
71200 +
71201 }
71202
71203 out:
71204 @@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71205
71206 if (end < start)
71207 return -EINVAL;
71208 +
71209 +#ifdef CONFIG_PAX_SEGMEXEC
71210 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71211 + if (end > SEGMEXEC_TASK_SIZE)
71212 + return -EINVAL;
71213 + } else
71214 +#endif
71215 +
71216 + if (end > TASK_SIZE)
71217 + return -EINVAL;
71218 +
71219 if (end == start)
71220 return 0;
71221
71222 @@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71223 if (!mm)
71224 goto out;
71225
71226 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71227 + if (mm != current->mm &&
71228 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71229 + err = -EPERM;
71230 + goto out;
71231 + }
71232 +#endif
71233 +
71234 /*
71235 * Check if this process has the right to modify the specified
71236 * process. The right exists if the process has administrative
71237 @@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71238 rcu_read_lock();
71239 tcred = __task_cred(task);
71240 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71241 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71242 - !capable(CAP_SYS_NICE)) {
71243 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71244 rcu_read_unlock();
71245 err = -EPERM;
71246 goto out;
71247 diff --git a/mm/migrate.c b/mm/migrate.c
71248 index 1503b6b..156c672 100644
71249 --- a/mm/migrate.c
71250 +++ b/mm/migrate.c
71251 @@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71252 if (!mm)
71253 return -EINVAL;
71254
71255 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71256 + if (mm != current->mm &&
71257 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71258 + err = -EPERM;
71259 + goto out;
71260 + }
71261 +#endif
71262 +
71263 /*
71264 * Check if this process has the right to modify the specified
71265 * process. The right exists if the process has administrative
71266 @@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71267 rcu_read_lock();
71268 tcred = __task_cred(task);
71269 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71270 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71271 - !capable(CAP_SYS_NICE)) {
71272 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71273 rcu_read_unlock();
71274 err = -EPERM;
71275 goto out;
71276 diff --git a/mm/mlock.c b/mm/mlock.c
71277 index ef726e8..13e0901 100644
71278 --- a/mm/mlock.c
71279 +++ b/mm/mlock.c
71280 @@ -13,6 +13,7 @@
71281 #include <linux/pagemap.h>
71282 #include <linux/mempolicy.h>
71283 #include <linux/syscalls.h>
71284 +#include <linux/security.h>
71285 #include <linux/sched.h>
71286 #include <linux/export.h>
71287 #include <linux/rmap.h>
71288 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71289 return -EINVAL;
71290 if (end == start)
71291 return 0;
71292 + if (end > TASK_SIZE)
71293 + return -EINVAL;
71294 +
71295 vma = find_vma(current->mm, start);
71296 if (!vma || vma->vm_start > start)
71297 return -ENOMEM;
71298 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71299 for (nstart = start ; ; ) {
71300 vm_flags_t newflags;
71301
71302 +#ifdef CONFIG_PAX_SEGMEXEC
71303 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71304 + break;
71305 +#endif
71306 +
71307 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71308
71309 newflags = vma->vm_flags | VM_LOCKED;
71310 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71311 lock_limit >>= PAGE_SHIFT;
71312
71313 /* check against resource limits */
71314 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71315 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71316 error = do_mlock(start, len, 1);
71317 up_write(&current->mm->mmap_sem);
71318 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71319 static int do_mlockall(int flags)
71320 {
71321 struct vm_area_struct * vma, * prev = NULL;
71322 - unsigned int def_flags = 0;
71323
71324 if (flags & MCL_FUTURE)
71325 - def_flags = VM_LOCKED;
71326 - current->mm->def_flags = def_flags;
71327 + current->mm->def_flags |= VM_LOCKED;
71328 + else
71329 + current->mm->def_flags &= ~VM_LOCKED;
71330 if (flags == MCL_FUTURE)
71331 goto out;
71332
71333 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71334 vm_flags_t newflags;
71335
71336 +#ifdef CONFIG_PAX_SEGMEXEC
71337 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71338 + break;
71339 +#endif
71340 +
71341 + BUG_ON(vma->vm_end > TASK_SIZE);
71342 newflags = vma->vm_flags | VM_LOCKED;
71343 if (!(flags & MCL_CURRENT))
71344 newflags &= ~VM_LOCKED;
71345 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71346 lock_limit >>= PAGE_SHIFT;
71347
71348 ret = -ENOMEM;
71349 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71350 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71351 capable(CAP_IPC_LOCK))
71352 ret = do_mlockall(flags);
71353 diff --git a/mm/mmap.c b/mm/mmap.c
71354 index da15a79..314aef3 100644
71355 --- a/mm/mmap.c
71356 +++ b/mm/mmap.c
71357 @@ -46,6 +46,16 @@
71358 #define arch_rebalance_pgtables(addr, len) (addr)
71359 #endif
71360
71361 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71362 +{
71363 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71364 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71365 + up_read(&mm->mmap_sem);
71366 + BUG();
71367 + }
71368 +#endif
71369 +}
71370 +
71371 static void unmap_region(struct mm_struct *mm,
71372 struct vm_area_struct *vma, struct vm_area_struct *prev,
71373 unsigned long start, unsigned long end);
71374 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71375 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71376 *
71377 */
71378 -pgprot_t protection_map[16] = {
71379 +pgprot_t protection_map[16] __read_only = {
71380 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71381 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71382 };
71383
71384 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71385 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71386 {
71387 - return __pgprot(pgprot_val(protection_map[vm_flags &
71388 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71389 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71390 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71391 +
71392 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71393 + if (!(__supported_pte_mask & _PAGE_NX) &&
71394 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71395 + (vm_flags & (VM_READ | VM_WRITE)))
71396 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71397 +#endif
71398 +
71399 + return prot;
71400 }
71401 EXPORT_SYMBOL(vm_get_page_prot);
71402
71403 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71404 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71405 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71406 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71407 /*
71408 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71409 * other variables. It can be updated by several CPUs frequently.
71410 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71411 struct vm_area_struct *next = vma->vm_next;
71412
71413 might_sleep();
71414 + BUG_ON(vma->vm_mirror);
71415 if (vma->vm_ops && vma->vm_ops->close)
71416 vma->vm_ops->close(vma);
71417 if (vma->vm_file) {
71418 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71419 * not page aligned -Ram Gupta
71420 */
71421 rlim = rlimit(RLIMIT_DATA);
71422 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71423 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71424 (mm->end_data - mm->start_data) > rlim)
71425 goto out;
71426 @@ -689,6 +711,12 @@ static int
71427 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71428 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71429 {
71430 +
71431 +#ifdef CONFIG_PAX_SEGMEXEC
71432 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71433 + return 0;
71434 +#endif
71435 +
71436 if (is_mergeable_vma(vma, file, vm_flags) &&
71437 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71438 if (vma->vm_pgoff == vm_pgoff)
71439 @@ -708,6 +736,12 @@ static int
71440 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71441 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71442 {
71443 +
71444 +#ifdef CONFIG_PAX_SEGMEXEC
71445 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71446 + return 0;
71447 +#endif
71448 +
71449 if (is_mergeable_vma(vma, file, vm_flags) &&
71450 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71451 pgoff_t vm_pglen;
71452 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71453 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71454 struct vm_area_struct *prev, unsigned long addr,
71455 unsigned long end, unsigned long vm_flags,
71456 - struct anon_vma *anon_vma, struct file *file,
71457 + struct anon_vma *anon_vma, struct file *file,
71458 pgoff_t pgoff, struct mempolicy *policy)
71459 {
71460 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71461 struct vm_area_struct *area, *next;
71462 int err;
71463
71464 +#ifdef CONFIG_PAX_SEGMEXEC
71465 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71466 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71467 +
71468 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71469 +#endif
71470 +
71471 /*
71472 * We later require that vma->vm_flags == vm_flags,
71473 * so this tests vma->vm_flags & VM_SPECIAL, too.
71474 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71475 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71476 next = next->vm_next;
71477
71478 +#ifdef CONFIG_PAX_SEGMEXEC
71479 + if (prev)
71480 + prev_m = pax_find_mirror_vma(prev);
71481 + if (area)
71482 + area_m = pax_find_mirror_vma(area);
71483 + if (next)
71484 + next_m = pax_find_mirror_vma(next);
71485 +#endif
71486 +
71487 /*
71488 * Can it merge with the predecessor?
71489 */
71490 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71491 /* cases 1, 6 */
71492 err = vma_adjust(prev, prev->vm_start,
71493 next->vm_end, prev->vm_pgoff, NULL);
71494 - } else /* cases 2, 5, 7 */
71495 +
71496 +#ifdef CONFIG_PAX_SEGMEXEC
71497 + if (!err && prev_m)
71498 + err = vma_adjust(prev_m, prev_m->vm_start,
71499 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71500 +#endif
71501 +
71502 + } else { /* cases 2, 5, 7 */
71503 err = vma_adjust(prev, prev->vm_start,
71504 end, prev->vm_pgoff, NULL);
71505 +
71506 +#ifdef CONFIG_PAX_SEGMEXEC
71507 + if (!err && prev_m)
71508 + err = vma_adjust(prev_m, prev_m->vm_start,
71509 + end_m, prev_m->vm_pgoff, NULL);
71510 +#endif
71511 +
71512 + }
71513 if (err)
71514 return NULL;
71515 khugepaged_enter_vma_merge(prev);
71516 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71517 mpol_equal(policy, vma_policy(next)) &&
71518 can_vma_merge_before(next, vm_flags,
71519 anon_vma, file, pgoff+pglen)) {
71520 - if (prev && addr < prev->vm_end) /* case 4 */
71521 + if (prev && addr < prev->vm_end) { /* case 4 */
71522 err = vma_adjust(prev, prev->vm_start,
71523 addr, prev->vm_pgoff, NULL);
71524 - else /* cases 3, 8 */
71525 +
71526 +#ifdef CONFIG_PAX_SEGMEXEC
71527 + if (!err && prev_m)
71528 + err = vma_adjust(prev_m, prev_m->vm_start,
71529 + addr_m, prev_m->vm_pgoff, NULL);
71530 +#endif
71531 +
71532 + } else { /* cases 3, 8 */
71533 err = vma_adjust(area, addr, next->vm_end,
71534 next->vm_pgoff - pglen, NULL);
71535 +
71536 +#ifdef CONFIG_PAX_SEGMEXEC
71537 + if (!err && area_m)
71538 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71539 + next_m->vm_pgoff - pglen, NULL);
71540 +#endif
71541 +
71542 + }
71543 if (err)
71544 return NULL;
71545 khugepaged_enter_vma_merge(area);
71546 @@ -921,14 +1001,11 @@ none:
71547 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71548 struct file *file, long pages)
71549 {
71550 - const unsigned long stack_flags
71551 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71552 -
71553 if (file) {
71554 mm->shared_vm += pages;
71555 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71556 mm->exec_vm += pages;
71557 - } else if (flags & stack_flags)
71558 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71559 mm->stack_vm += pages;
71560 if (flags & (VM_RESERVED|VM_IO))
71561 mm->reserved_vm += pages;
71562 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71563 * (the exception is when the underlying filesystem is noexec
71564 * mounted, in which case we dont add PROT_EXEC.)
71565 */
71566 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71567 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71568 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71569 prot |= PROT_EXEC;
71570
71571 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71572 /* Obtain the address to map to. we verify (or select) it and ensure
71573 * that it represents a valid section of the address space.
71574 */
71575 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71576 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71577 if (addr & ~PAGE_MASK)
71578 return addr;
71579
71580 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71581 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71582 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71583
71584 +#ifdef CONFIG_PAX_MPROTECT
71585 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71586 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71587 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71588 + gr_log_rwxmmap(file);
71589 +
71590 +#ifdef CONFIG_PAX_EMUPLT
71591 + vm_flags &= ~VM_EXEC;
71592 +#else
71593 + return -EPERM;
71594 +#endif
71595 +
71596 + }
71597 +
71598 + if (!(vm_flags & VM_EXEC))
71599 + vm_flags &= ~VM_MAYEXEC;
71600 +#else
71601 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71602 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71603 +#endif
71604 + else
71605 + vm_flags &= ~VM_MAYWRITE;
71606 + }
71607 +#endif
71608 +
71609 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71610 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71611 + vm_flags &= ~VM_PAGEEXEC;
71612 +#endif
71613 +
71614 if (flags & MAP_LOCKED)
71615 if (!can_do_mlock())
71616 return -EPERM;
71617 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71618 locked += mm->locked_vm;
71619 lock_limit = rlimit(RLIMIT_MEMLOCK);
71620 lock_limit >>= PAGE_SHIFT;
71621 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71622 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71623 return -EAGAIN;
71624 }
71625 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71626 if (error)
71627 return error;
71628
71629 + if (!gr_acl_handle_mmap(file, prot))
71630 + return -EACCES;
71631 +
71632 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71633 }
71634 EXPORT_SYMBOL(do_mmap_pgoff);
71635 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71636 vm_flags_t vm_flags = vma->vm_flags;
71637
71638 /* If it was private or non-writable, the write bit is already clear */
71639 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71640 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71641 return 0;
71642
71643 /* The backer wishes to know when pages are first written to? */
71644 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71645 unsigned long charged = 0;
71646 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71647
71648 +#ifdef CONFIG_PAX_SEGMEXEC
71649 + struct vm_area_struct *vma_m = NULL;
71650 +#endif
71651 +
71652 + /*
71653 + * mm->mmap_sem is required to protect against another thread
71654 + * changing the mappings in case we sleep.
71655 + */
71656 + verify_mm_writelocked(mm);
71657 +
71658 /* Clear old maps */
71659 error = -ENOMEM;
71660 -munmap_back:
71661 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71662 if (vma && vma->vm_start < addr + len) {
71663 if (do_munmap(mm, addr, len))
71664 return -ENOMEM;
71665 - goto munmap_back;
71666 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71667 + BUG_ON(vma && vma->vm_start < addr + len);
71668 }
71669
71670 /* Check against address space limit. */
71671 @@ -1258,6 +1379,16 @@ munmap_back:
71672 goto unacct_error;
71673 }
71674
71675 +#ifdef CONFIG_PAX_SEGMEXEC
71676 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71677 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71678 + if (!vma_m) {
71679 + error = -ENOMEM;
71680 + goto free_vma;
71681 + }
71682 + }
71683 +#endif
71684 +
71685 vma->vm_mm = mm;
71686 vma->vm_start = addr;
71687 vma->vm_end = addr + len;
71688 @@ -1282,6 +1413,19 @@ munmap_back:
71689 error = file->f_op->mmap(file, vma);
71690 if (error)
71691 goto unmap_and_free_vma;
71692 +
71693 +#ifdef CONFIG_PAX_SEGMEXEC
71694 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71695 + added_exe_file_vma(mm);
71696 +#endif
71697 +
71698 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71699 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71700 + vma->vm_flags |= VM_PAGEEXEC;
71701 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71702 + }
71703 +#endif
71704 +
71705 if (vm_flags & VM_EXECUTABLE)
71706 added_exe_file_vma(mm);
71707
71708 @@ -1319,6 +1463,11 @@ munmap_back:
71709 vma_link(mm, vma, prev, rb_link, rb_parent);
71710 file = vma->vm_file;
71711
71712 +#ifdef CONFIG_PAX_SEGMEXEC
71713 + if (vma_m)
71714 + BUG_ON(pax_mirror_vma(vma_m, vma));
71715 +#endif
71716 +
71717 /* Once vma denies write, undo our temporary denial count */
71718 if (correct_wcount)
71719 atomic_inc(&inode->i_writecount);
71720 @@ -1327,6 +1476,7 @@ out:
71721
71722 mm->total_vm += len >> PAGE_SHIFT;
71723 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71724 + track_exec_limit(mm, addr, addr + len, vm_flags);
71725 if (vm_flags & VM_LOCKED) {
71726 if (!mlock_vma_pages_range(vma, addr, addr + len))
71727 mm->locked_vm += (len >> PAGE_SHIFT);
71728 @@ -1344,6 +1494,12 @@ unmap_and_free_vma:
71729 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71730 charged = 0;
71731 free_vma:
71732 +
71733 +#ifdef CONFIG_PAX_SEGMEXEC
71734 + if (vma_m)
71735 + kmem_cache_free(vm_area_cachep, vma_m);
71736 +#endif
71737 +
71738 kmem_cache_free(vm_area_cachep, vma);
71739 unacct_error:
71740 if (charged)
71741 @@ -1351,6 +1507,44 @@ unacct_error:
71742 return error;
71743 }
71744
71745 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71746 +{
71747 + if (!vma) {
71748 +#ifdef CONFIG_STACK_GROWSUP
71749 + if (addr > sysctl_heap_stack_gap)
71750 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71751 + else
71752 + vma = find_vma(current->mm, 0);
71753 + if (vma && (vma->vm_flags & VM_GROWSUP))
71754 + return false;
71755 +#endif
71756 + return true;
71757 + }
71758 +
71759 + if (addr + len > vma->vm_start)
71760 + return false;
71761 +
71762 + if (vma->vm_flags & VM_GROWSDOWN)
71763 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71764 +#ifdef CONFIG_STACK_GROWSUP
71765 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71766 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71767 +#endif
71768 +
71769 + return true;
71770 +}
71771 +
71772 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71773 +{
71774 + if (vma->vm_start < len)
71775 + return -ENOMEM;
71776 + if (!(vma->vm_flags & VM_GROWSDOWN))
71777 + return vma->vm_start - len;
71778 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71779 + return vma->vm_start - len - sysctl_heap_stack_gap;
71780 + return -ENOMEM;
71781 +}
71782 +
71783 /* Get an address range which is currently unmapped.
71784 * For shmat() with addr=0.
71785 *
71786 @@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71787 if (flags & MAP_FIXED)
71788 return addr;
71789
71790 +#ifdef CONFIG_PAX_RANDMMAP
71791 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71792 +#endif
71793 +
71794 if (addr) {
71795 addr = PAGE_ALIGN(addr);
71796 - vma = find_vma(mm, addr);
71797 - if (TASK_SIZE - len >= addr &&
71798 - (!vma || addr + len <= vma->vm_start))
71799 - return addr;
71800 + if (TASK_SIZE - len >= addr) {
71801 + vma = find_vma(mm, addr);
71802 + if (check_heap_stack_gap(vma, addr, len))
71803 + return addr;
71804 + }
71805 }
71806 if (len > mm->cached_hole_size) {
71807 - start_addr = addr = mm->free_area_cache;
71808 + start_addr = addr = mm->free_area_cache;
71809 } else {
71810 - start_addr = addr = TASK_UNMAPPED_BASE;
71811 - mm->cached_hole_size = 0;
71812 + start_addr = addr = mm->mmap_base;
71813 + mm->cached_hole_size = 0;
71814 }
71815
71816 full_search:
71817 @@ -1399,34 +1598,40 @@ full_search:
71818 * Start a new search - just in case we missed
71819 * some holes.
71820 */
71821 - if (start_addr != TASK_UNMAPPED_BASE) {
71822 - addr = TASK_UNMAPPED_BASE;
71823 - start_addr = addr;
71824 + if (start_addr != mm->mmap_base) {
71825 + start_addr = addr = mm->mmap_base;
71826 mm->cached_hole_size = 0;
71827 goto full_search;
71828 }
71829 return -ENOMEM;
71830 }
71831 - if (!vma || addr + len <= vma->vm_start) {
71832 - /*
71833 - * Remember the place where we stopped the search:
71834 - */
71835 - mm->free_area_cache = addr + len;
71836 - return addr;
71837 - }
71838 + if (check_heap_stack_gap(vma, addr, len))
71839 + break;
71840 if (addr + mm->cached_hole_size < vma->vm_start)
71841 mm->cached_hole_size = vma->vm_start - addr;
71842 addr = vma->vm_end;
71843 }
71844 +
71845 + /*
71846 + * Remember the place where we stopped the search:
71847 + */
71848 + mm->free_area_cache = addr + len;
71849 + return addr;
71850 }
71851 #endif
71852
71853 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71854 {
71855 +
71856 +#ifdef CONFIG_PAX_SEGMEXEC
71857 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71858 + return;
71859 +#endif
71860 +
71861 /*
71862 * Is this a new hole at the lowest possible address?
71863 */
71864 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71865 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71866 mm->free_area_cache = addr;
71867 mm->cached_hole_size = ~0UL;
71868 }
71869 @@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71870 {
71871 struct vm_area_struct *vma;
71872 struct mm_struct *mm = current->mm;
71873 - unsigned long addr = addr0;
71874 + unsigned long base = mm->mmap_base, addr = addr0;
71875
71876 /* requested length too big for entire address space */
71877 if (len > TASK_SIZE)
71878 @@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71879 if (flags & MAP_FIXED)
71880 return addr;
71881
71882 +#ifdef CONFIG_PAX_RANDMMAP
71883 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71884 +#endif
71885 +
71886 /* requesting a specific address */
71887 if (addr) {
71888 addr = PAGE_ALIGN(addr);
71889 - vma = find_vma(mm, addr);
71890 - if (TASK_SIZE - len >= addr &&
71891 - (!vma || addr + len <= vma->vm_start))
71892 - return addr;
71893 + if (TASK_SIZE - len >= addr) {
71894 + vma = find_vma(mm, addr);
71895 + if (check_heap_stack_gap(vma, addr, len))
71896 + return addr;
71897 + }
71898 }
71899
71900 /* check if free_area_cache is useful for us */
71901 @@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71902 /* make sure it can fit in the remaining address space */
71903 if (addr > len) {
71904 vma = find_vma(mm, addr-len);
71905 - if (!vma || addr <= vma->vm_start)
71906 + if (check_heap_stack_gap(vma, addr - len, len))
71907 /* remember the address as a hint for next time */
71908 return (mm->free_area_cache = addr-len);
71909 }
71910 @@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71911 * return with success:
71912 */
71913 vma = find_vma(mm, addr);
71914 - if (!vma || addr+len <= vma->vm_start)
71915 + if (check_heap_stack_gap(vma, addr, len))
71916 /* remember the address as a hint for next time */
71917 return (mm->free_area_cache = addr);
71918
71919 @@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71920 mm->cached_hole_size = vma->vm_start - addr;
71921
71922 /* try just below the current vma->vm_start */
71923 - addr = vma->vm_start-len;
71924 - } while (len < vma->vm_start);
71925 + addr = skip_heap_stack_gap(vma, len);
71926 + } while (!IS_ERR_VALUE(addr));
71927
71928 bottomup:
71929 /*
71930 @@ -1510,13 +1720,21 @@ bottomup:
71931 * can happen with large stack limits and large mmap()
71932 * allocations.
71933 */
71934 + mm->mmap_base = TASK_UNMAPPED_BASE;
71935 +
71936 +#ifdef CONFIG_PAX_RANDMMAP
71937 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71938 + mm->mmap_base += mm->delta_mmap;
71939 +#endif
71940 +
71941 + mm->free_area_cache = mm->mmap_base;
71942 mm->cached_hole_size = ~0UL;
71943 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71944 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71945 /*
71946 * Restore the topdown base:
71947 */
71948 - mm->free_area_cache = mm->mmap_base;
71949 + mm->mmap_base = base;
71950 + mm->free_area_cache = base;
71951 mm->cached_hole_size = ~0UL;
71952
71953 return addr;
71954 @@ -1525,6 +1743,12 @@ bottomup:
71955
71956 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71957 {
71958 +
71959 +#ifdef CONFIG_PAX_SEGMEXEC
71960 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71961 + return;
71962 +#endif
71963 +
71964 /*
71965 * Is this a new hole at the highest possible address?
71966 */
71967 @@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71968 mm->free_area_cache = addr;
71969
71970 /* dont allow allocations above current base */
71971 - if (mm->free_area_cache > mm->mmap_base)
71972 + if (mm->free_area_cache > mm->mmap_base) {
71973 mm->free_area_cache = mm->mmap_base;
71974 + mm->cached_hole_size = ~0UL;
71975 + }
71976 }
71977
71978 unsigned long
71979 @@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71980 return vma;
71981 }
71982
71983 +#ifdef CONFIG_PAX_SEGMEXEC
71984 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71985 +{
71986 + struct vm_area_struct *vma_m;
71987 +
71988 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71989 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71990 + BUG_ON(vma->vm_mirror);
71991 + return NULL;
71992 + }
71993 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71994 + vma_m = vma->vm_mirror;
71995 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71996 + BUG_ON(vma->vm_file != vma_m->vm_file);
71997 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71998 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71999 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72000 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72001 + return vma_m;
72002 +}
72003 +#endif
72004 +
72005 /*
72006 * Verify that the stack growth is acceptable and
72007 * update accounting. This is shared with both the
72008 @@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72009 return -ENOMEM;
72010
72011 /* Stack limit test */
72012 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
72013 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72014 return -ENOMEM;
72015
72016 @@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72017 locked = mm->locked_vm + grow;
72018 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72019 limit >>= PAGE_SHIFT;
72020 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72021 if (locked > limit && !capable(CAP_IPC_LOCK))
72022 return -ENOMEM;
72023 }
72024 @@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72025 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72026 * vma is the last one with address > vma->vm_end. Have to extend vma.
72027 */
72028 +#ifndef CONFIG_IA64
72029 +static
72030 +#endif
72031 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72032 {
72033 int error;
72034 + bool locknext;
72035
72036 if (!(vma->vm_flags & VM_GROWSUP))
72037 return -EFAULT;
72038
72039 + /* Also guard against wrapping around to address 0. */
72040 + if (address < PAGE_ALIGN(address+1))
72041 + address = PAGE_ALIGN(address+1);
72042 + else
72043 + return -ENOMEM;
72044 +
72045 /*
72046 * We must make sure the anon_vma is allocated
72047 * so that the anon_vma locking is not a noop.
72048 */
72049 if (unlikely(anon_vma_prepare(vma)))
72050 return -ENOMEM;
72051 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72052 + if (locknext && anon_vma_prepare(vma->vm_next))
72053 + return -ENOMEM;
72054 vma_lock_anon_vma(vma);
72055 + if (locknext)
72056 + vma_lock_anon_vma(vma->vm_next);
72057
72058 /*
72059 * vma->vm_start/vm_end cannot change under us because the caller
72060 * is required to hold the mmap_sem in read mode. We need the
72061 - * anon_vma lock to serialize against concurrent expand_stacks.
72062 - * Also guard against wrapping around to address 0.
72063 + * anon_vma locks to serialize against concurrent expand_stacks
72064 + * and expand_upwards.
72065 */
72066 - if (address < PAGE_ALIGN(address+4))
72067 - address = PAGE_ALIGN(address+4);
72068 - else {
72069 - vma_unlock_anon_vma(vma);
72070 - return -ENOMEM;
72071 - }
72072 error = 0;
72073
72074 /* Somebody else might have raced and expanded it already */
72075 - if (address > vma->vm_end) {
72076 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72077 + error = -ENOMEM;
72078 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72079 unsigned long size, grow;
72080
72081 size = address - vma->vm_start;
72082 @@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72083 }
72084 }
72085 }
72086 + if (locknext)
72087 + vma_unlock_anon_vma(vma->vm_next);
72088 vma_unlock_anon_vma(vma);
72089 khugepaged_enter_vma_merge(vma);
72090 return error;
72091 @@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
72092 unsigned long address)
72093 {
72094 int error;
72095 + bool lockprev = false;
72096 + struct vm_area_struct *prev;
72097
72098 /*
72099 * We must make sure the anon_vma is allocated
72100 @@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
72101 if (error)
72102 return error;
72103
72104 + prev = vma->vm_prev;
72105 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72106 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72107 +#endif
72108 + if (lockprev && anon_vma_prepare(prev))
72109 + return -ENOMEM;
72110 + if (lockprev)
72111 + vma_lock_anon_vma(prev);
72112 +
72113 vma_lock_anon_vma(vma);
72114
72115 /*
72116 @@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
72117 */
72118
72119 /* Somebody else might have raced and expanded it already */
72120 - if (address < vma->vm_start) {
72121 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72122 + error = -ENOMEM;
72123 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72124 unsigned long size, grow;
72125
72126 +#ifdef CONFIG_PAX_SEGMEXEC
72127 + struct vm_area_struct *vma_m;
72128 +
72129 + vma_m = pax_find_mirror_vma(vma);
72130 +#endif
72131 +
72132 size = vma->vm_end - address;
72133 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72134
72135 @@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
72136 if (!error) {
72137 vma->vm_start = address;
72138 vma->vm_pgoff -= grow;
72139 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72140 +
72141 +#ifdef CONFIG_PAX_SEGMEXEC
72142 + if (vma_m) {
72143 + vma_m->vm_start -= grow << PAGE_SHIFT;
72144 + vma_m->vm_pgoff -= grow;
72145 + }
72146 +#endif
72147 +
72148 perf_event_mmap(vma);
72149 }
72150 }
72151 }
72152 vma_unlock_anon_vma(vma);
72153 + if (lockprev)
72154 + vma_unlock_anon_vma(prev);
72155 khugepaged_enter_vma_merge(vma);
72156 return error;
72157 }
72158 @@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72159 do {
72160 long nrpages = vma_pages(vma);
72161
72162 +#ifdef CONFIG_PAX_SEGMEXEC
72163 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72164 + vma = remove_vma(vma);
72165 + continue;
72166 + }
72167 +#endif
72168 +
72169 mm->total_vm -= nrpages;
72170 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72171 vma = remove_vma(vma);
72172 @@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72173 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72174 vma->vm_prev = NULL;
72175 do {
72176 +
72177 +#ifdef CONFIG_PAX_SEGMEXEC
72178 + if (vma->vm_mirror) {
72179 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72180 + vma->vm_mirror->vm_mirror = NULL;
72181 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
72182 + vma->vm_mirror = NULL;
72183 + }
72184 +#endif
72185 +
72186 rb_erase(&vma->vm_rb, &mm->mm_rb);
72187 mm->map_count--;
72188 tail_vma = vma;
72189 @@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72190 struct vm_area_struct *new;
72191 int err = -ENOMEM;
72192
72193 +#ifdef CONFIG_PAX_SEGMEXEC
72194 + struct vm_area_struct *vma_m, *new_m = NULL;
72195 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72196 +#endif
72197 +
72198 if (is_vm_hugetlb_page(vma) && (addr &
72199 ~(huge_page_mask(hstate_vma(vma)))))
72200 return -EINVAL;
72201
72202 +#ifdef CONFIG_PAX_SEGMEXEC
72203 + vma_m = pax_find_mirror_vma(vma);
72204 +#endif
72205 +
72206 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72207 if (!new)
72208 goto out_err;
72209
72210 +#ifdef CONFIG_PAX_SEGMEXEC
72211 + if (vma_m) {
72212 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72213 + if (!new_m) {
72214 + kmem_cache_free(vm_area_cachep, new);
72215 + goto out_err;
72216 + }
72217 + }
72218 +#endif
72219 +
72220 /* most fields are the same, copy all, and then fixup */
72221 *new = *vma;
72222
72223 @@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72224 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72225 }
72226
72227 +#ifdef CONFIG_PAX_SEGMEXEC
72228 + if (vma_m) {
72229 + *new_m = *vma_m;
72230 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
72231 + new_m->vm_mirror = new;
72232 + new->vm_mirror = new_m;
72233 +
72234 + if (new_below)
72235 + new_m->vm_end = addr_m;
72236 + else {
72237 + new_m->vm_start = addr_m;
72238 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72239 + }
72240 + }
72241 +#endif
72242 +
72243 pol = mpol_dup(vma_policy(vma));
72244 if (IS_ERR(pol)) {
72245 err = PTR_ERR(pol);
72246 @@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72247 else
72248 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72249
72250 +#ifdef CONFIG_PAX_SEGMEXEC
72251 + if (!err && vma_m) {
72252 + if (anon_vma_clone(new_m, vma_m))
72253 + goto out_free_mpol;
72254 +
72255 + mpol_get(pol);
72256 + vma_set_policy(new_m, pol);
72257 +
72258 + if (new_m->vm_file) {
72259 + get_file(new_m->vm_file);
72260 + if (vma_m->vm_flags & VM_EXECUTABLE)
72261 + added_exe_file_vma(mm);
72262 + }
72263 +
72264 + if (new_m->vm_ops && new_m->vm_ops->open)
72265 + new_m->vm_ops->open(new_m);
72266 +
72267 + if (new_below)
72268 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72269 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72270 + else
72271 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72272 +
72273 + if (err) {
72274 + if (new_m->vm_ops && new_m->vm_ops->close)
72275 + new_m->vm_ops->close(new_m);
72276 + if (new_m->vm_file) {
72277 + if (vma_m->vm_flags & VM_EXECUTABLE)
72278 + removed_exe_file_vma(mm);
72279 + fput(new_m->vm_file);
72280 + }
72281 + mpol_put(pol);
72282 + }
72283 + }
72284 +#endif
72285 +
72286 /* Success. */
72287 if (!err)
72288 return 0;
72289 @@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72290 removed_exe_file_vma(mm);
72291 fput(new->vm_file);
72292 }
72293 - unlink_anon_vmas(new);
72294 out_free_mpol:
72295 mpol_put(pol);
72296 out_free_vma:
72297 +
72298 +#ifdef CONFIG_PAX_SEGMEXEC
72299 + if (new_m) {
72300 + unlink_anon_vmas(new_m);
72301 + kmem_cache_free(vm_area_cachep, new_m);
72302 + }
72303 +#endif
72304 +
72305 + unlink_anon_vmas(new);
72306 kmem_cache_free(vm_area_cachep, new);
72307 out_err:
72308 return err;
72309 @@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72310 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72311 unsigned long addr, int new_below)
72312 {
72313 +
72314 +#ifdef CONFIG_PAX_SEGMEXEC
72315 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72316 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72317 + if (mm->map_count >= sysctl_max_map_count-1)
72318 + return -ENOMEM;
72319 + } else
72320 +#endif
72321 +
72322 if (mm->map_count >= sysctl_max_map_count)
72323 return -ENOMEM;
72324
72325 @@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72326 * work. This now handles partial unmappings.
72327 * Jeremy Fitzhardinge <jeremy@goop.org>
72328 */
72329 +#ifdef CONFIG_PAX_SEGMEXEC
72330 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72331 {
72332 + int ret = __do_munmap(mm, start, len);
72333 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72334 + return ret;
72335 +
72336 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72337 +}
72338 +
72339 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72340 +#else
72341 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72342 +#endif
72343 +{
72344 unsigned long end;
72345 struct vm_area_struct *vma, *prev, *last;
72346
72347 + /*
72348 + * mm->mmap_sem is required to protect against another thread
72349 + * changing the mappings in case we sleep.
72350 + */
72351 + verify_mm_writelocked(mm);
72352 +
72353 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72354 return -EINVAL;
72355
72356 @@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72357 /* Fix up all other VM information */
72358 remove_vma_list(mm, vma);
72359
72360 + track_exec_limit(mm, start, end, 0UL);
72361 +
72362 return 0;
72363 }
72364
72365 @@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72366
72367 profile_munmap(addr);
72368
72369 +#ifdef CONFIG_PAX_SEGMEXEC
72370 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72371 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72372 + return -EINVAL;
72373 +#endif
72374 +
72375 down_write(&mm->mmap_sem);
72376 ret = do_munmap(mm, addr, len);
72377 up_write(&mm->mmap_sem);
72378 return ret;
72379 }
72380
72381 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72382 -{
72383 -#ifdef CONFIG_DEBUG_VM
72384 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72385 - WARN_ON(1);
72386 - up_read(&mm->mmap_sem);
72387 - }
72388 -#endif
72389 -}
72390 -
72391 /*
72392 * this is really a simplified "do_mmap". it only handles
72393 * anonymous maps. eventually we may be able to do some
72394 @@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72395 struct rb_node ** rb_link, * rb_parent;
72396 pgoff_t pgoff = addr >> PAGE_SHIFT;
72397 int error;
72398 + unsigned long charged;
72399
72400 len = PAGE_ALIGN(len);
72401 if (!len)
72402 @@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72403
72404 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72405
72406 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72407 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72408 + flags &= ~VM_EXEC;
72409 +
72410 +#ifdef CONFIG_PAX_MPROTECT
72411 + if (mm->pax_flags & MF_PAX_MPROTECT)
72412 + flags &= ~VM_MAYEXEC;
72413 +#endif
72414 +
72415 + }
72416 +#endif
72417 +
72418 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72419 if (error & ~PAGE_MASK)
72420 return error;
72421
72422 + charged = len >> PAGE_SHIFT;
72423 +
72424 /*
72425 * mlock MCL_FUTURE?
72426 */
72427 if (mm->def_flags & VM_LOCKED) {
72428 unsigned long locked, lock_limit;
72429 - locked = len >> PAGE_SHIFT;
72430 + locked = charged;
72431 locked += mm->locked_vm;
72432 lock_limit = rlimit(RLIMIT_MEMLOCK);
72433 lock_limit >>= PAGE_SHIFT;
72434 @@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72435 /*
72436 * Clear old maps. this also does some error checking for us
72437 */
72438 - munmap_back:
72439 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72440 if (vma && vma->vm_start < addr + len) {
72441 if (do_munmap(mm, addr, len))
72442 return -ENOMEM;
72443 - goto munmap_back;
72444 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72445 + BUG_ON(vma && vma->vm_start < addr + len);
72446 }
72447
72448 /* Check against address space limits *after* clearing old maps... */
72449 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72450 + if (!may_expand_vm(mm, charged))
72451 return -ENOMEM;
72452
72453 if (mm->map_count > sysctl_max_map_count)
72454 return -ENOMEM;
72455
72456 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
72457 + if (security_vm_enough_memory(charged))
72458 return -ENOMEM;
72459
72460 /* Can we just expand an old private anonymous mapping? */
72461 @@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72462 */
72463 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72464 if (!vma) {
72465 - vm_unacct_memory(len >> PAGE_SHIFT);
72466 + vm_unacct_memory(charged);
72467 return -ENOMEM;
72468 }
72469
72470 @@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72471 vma_link(mm, vma, prev, rb_link, rb_parent);
72472 out:
72473 perf_event_mmap(vma);
72474 - mm->total_vm += len >> PAGE_SHIFT;
72475 + mm->total_vm += charged;
72476 if (flags & VM_LOCKED) {
72477 if (!mlock_vma_pages_range(vma, addr, addr + len))
72478 - mm->locked_vm += (len >> PAGE_SHIFT);
72479 + mm->locked_vm += charged;
72480 }
72481 + track_exec_limit(mm, addr, addr + len, flags);
72482 return addr;
72483 }
72484
72485 @@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
72486 * Walk the list again, actually closing and freeing it,
72487 * with preemption enabled, without holding any MM locks.
72488 */
72489 - while (vma)
72490 + while (vma) {
72491 + vma->vm_mirror = NULL;
72492 vma = remove_vma(vma);
72493 + }
72494
72495 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72496 }
72497 @@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72498 struct vm_area_struct * __vma, * prev;
72499 struct rb_node ** rb_link, * rb_parent;
72500
72501 +#ifdef CONFIG_PAX_SEGMEXEC
72502 + struct vm_area_struct *vma_m = NULL;
72503 +#endif
72504 +
72505 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72506 + return -EPERM;
72507 +
72508 /*
72509 * The vm_pgoff of a purely anonymous vma should be irrelevant
72510 * until its first write fault, when page's anon_vma and index
72511 @@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72512 if ((vma->vm_flags & VM_ACCOUNT) &&
72513 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72514 return -ENOMEM;
72515 +
72516 +#ifdef CONFIG_PAX_SEGMEXEC
72517 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72518 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72519 + if (!vma_m)
72520 + return -ENOMEM;
72521 + }
72522 +#endif
72523 +
72524 vma_link(mm, vma, prev, rb_link, rb_parent);
72525 +
72526 +#ifdef CONFIG_PAX_SEGMEXEC
72527 + if (vma_m)
72528 + BUG_ON(pax_mirror_vma(vma_m, vma));
72529 +#endif
72530 +
72531 return 0;
72532 }
72533
72534 @@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72535 struct mempolicy *pol;
72536 bool faulted_in_anon_vma = true;
72537
72538 + BUG_ON(vma->vm_mirror);
72539 +
72540 /*
72541 * If anonymous vma has not yet been faulted, update new pgoff
72542 * to match new location, to increase its chance of merging.
72543 @@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72544 return NULL;
72545 }
72546
72547 +#ifdef CONFIG_PAX_SEGMEXEC
72548 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72549 +{
72550 + struct vm_area_struct *prev_m;
72551 + struct rb_node **rb_link_m, *rb_parent_m;
72552 + struct mempolicy *pol_m;
72553 +
72554 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72555 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72556 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72557 + *vma_m = *vma;
72558 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72559 + if (anon_vma_clone(vma_m, vma))
72560 + return -ENOMEM;
72561 + pol_m = vma_policy(vma_m);
72562 + mpol_get(pol_m);
72563 + vma_set_policy(vma_m, pol_m);
72564 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72565 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72566 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72567 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72568 + if (vma_m->vm_file)
72569 + get_file(vma_m->vm_file);
72570 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72571 + vma_m->vm_ops->open(vma_m);
72572 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72573 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72574 + vma_m->vm_mirror = vma;
72575 + vma->vm_mirror = vma_m;
72576 + return 0;
72577 +}
72578 +#endif
72579 +
72580 /*
72581 * Return true if the calling process may expand its vm space by the passed
72582 * number of pages
72583 @@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72584
72585 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72586
72587 +#ifdef CONFIG_PAX_RANDMMAP
72588 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72589 + cur -= mm->brk_gap;
72590 +#endif
72591 +
72592 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72593 if (cur + npages > lim)
72594 return 0;
72595 return 1;
72596 @@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
72597 vma->vm_start = addr;
72598 vma->vm_end = addr + len;
72599
72600 +#ifdef CONFIG_PAX_MPROTECT
72601 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72602 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72603 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72604 + return -EPERM;
72605 + if (!(vm_flags & VM_EXEC))
72606 + vm_flags &= ~VM_MAYEXEC;
72607 +#else
72608 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72609 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72610 +#endif
72611 + else
72612 + vm_flags &= ~VM_MAYWRITE;
72613 + }
72614 +#endif
72615 +
72616 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72617 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72618
72619 diff --git a/mm/mprotect.c b/mm/mprotect.c
72620 index f437d05..e3763f6 100644
72621 --- a/mm/mprotect.c
72622 +++ b/mm/mprotect.c
72623 @@ -23,10 +23,16 @@
72624 #include <linux/mmu_notifier.h>
72625 #include <linux/migrate.h>
72626 #include <linux/perf_event.h>
72627 +
72628 +#ifdef CONFIG_PAX_MPROTECT
72629 +#include <linux/elf.h>
72630 +#endif
72631 +
72632 #include <asm/uaccess.h>
72633 #include <asm/pgtable.h>
72634 #include <asm/cacheflush.h>
72635 #include <asm/tlbflush.h>
72636 +#include <asm/mmu_context.h>
72637
72638 #ifndef pgprot_modify
72639 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72640 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72641 flush_tlb_range(vma, start, end);
72642 }
72643
72644 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72645 +/* called while holding the mmap semaphor for writing except stack expansion */
72646 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72647 +{
72648 + unsigned long oldlimit, newlimit = 0UL;
72649 +
72650 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72651 + return;
72652 +
72653 + spin_lock(&mm->page_table_lock);
72654 + oldlimit = mm->context.user_cs_limit;
72655 + if ((prot & VM_EXEC) && oldlimit < end)
72656 + /* USER_CS limit moved up */
72657 + newlimit = end;
72658 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72659 + /* USER_CS limit moved down */
72660 + newlimit = start;
72661 +
72662 + if (newlimit) {
72663 + mm->context.user_cs_limit = newlimit;
72664 +
72665 +#ifdef CONFIG_SMP
72666 + wmb();
72667 + cpus_clear(mm->context.cpu_user_cs_mask);
72668 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72669 +#endif
72670 +
72671 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72672 + }
72673 + spin_unlock(&mm->page_table_lock);
72674 + if (newlimit == end) {
72675 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72676 +
72677 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72678 + if (is_vm_hugetlb_page(vma))
72679 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72680 + else
72681 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72682 + }
72683 +}
72684 +#endif
72685 +
72686 int
72687 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72688 unsigned long start, unsigned long end, unsigned long newflags)
72689 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72690 int error;
72691 int dirty_accountable = 0;
72692
72693 +#ifdef CONFIG_PAX_SEGMEXEC
72694 + struct vm_area_struct *vma_m = NULL;
72695 + unsigned long start_m, end_m;
72696 +
72697 + start_m = start + SEGMEXEC_TASK_SIZE;
72698 + end_m = end + SEGMEXEC_TASK_SIZE;
72699 +#endif
72700 +
72701 if (newflags == oldflags) {
72702 *pprev = vma;
72703 return 0;
72704 }
72705
72706 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72707 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72708 +
72709 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72710 + return -ENOMEM;
72711 +
72712 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72713 + return -ENOMEM;
72714 + }
72715 +
72716 /*
72717 * If we make a private mapping writable we increase our commit;
72718 * but (without finer accounting) cannot reduce our commit if we
72719 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72720 }
72721 }
72722
72723 +#ifdef CONFIG_PAX_SEGMEXEC
72724 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72725 + if (start != vma->vm_start) {
72726 + error = split_vma(mm, vma, start, 1);
72727 + if (error)
72728 + goto fail;
72729 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72730 + *pprev = (*pprev)->vm_next;
72731 + }
72732 +
72733 + if (end != vma->vm_end) {
72734 + error = split_vma(mm, vma, end, 0);
72735 + if (error)
72736 + goto fail;
72737 + }
72738 +
72739 + if (pax_find_mirror_vma(vma)) {
72740 + error = __do_munmap(mm, start_m, end_m - start_m);
72741 + if (error)
72742 + goto fail;
72743 + } else {
72744 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72745 + if (!vma_m) {
72746 + error = -ENOMEM;
72747 + goto fail;
72748 + }
72749 + vma->vm_flags = newflags;
72750 + error = pax_mirror_vma(vma_m, vma);
72751 + if (error) {
72752 + vma->vm_flags = oldflags;
72753 + goto fail;
72754 + }
72755 + }
72756 + }
72757 +#endif
72758 +
72759 /*
72760 * First try to merge with previous and/or next vma.
72761 */
72762 @@ -204,9 +306,21 @@ success:
72763 * vm_flags and vm_page_prot are protected by the mmap_sem
72764 * held in write mode.
72765 */
72766 +
72767 +#ifdef CONFIG_PAX_SEGMEXEC
72768 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72769 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72770 +#endif
72771 +
72772 vma->vm_flags = newflags;
72773 +
72774 +#ifdef CONFIG_PAX_MPROTECT
72775 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72776 + mm->binfmt->handle_mprotect(vma, newflags);
72777 +#endif
72778 +
72779 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72780 - vm_get_page_prot(newflags));
72781 + vm_get_page_prot(vma->vm_flags));
72782
72783 if (vma_wants_writenotify(vma)) {
72784 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72785 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72786 end = start + len;
72787 if (end <= start)
72788 return -ENOMEM;
72789 +
72790 +#ifdef CONFIG_PAX_SEGMEXEC
72791 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72792 + if (end > SEGMEXEC_TASK_SIZE)
72793 + return -EINVAL;
72794 + } else
72795 +#endif
72796 +
72797 + if (end > TASK_SIZE)
72798 + return -EINVAL;
72799 +
72800 if (!arch_validate_prot(prot))
72801 return -EINVAL;
72802
72803 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72804 /*
72805 * Does the application expect PROT_READ to imply PROT_EXEC:
72806 */
72807 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72808 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72809 prot |= PROT_EXEC;
72810
72811 vm_flags = calc_vm_prot_bits(prot);
72812 @@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72813 if (start > vma->vm_start)
72814 prev = vma;
72815
72816 +#ifdef CONFIG_PAX_MPROTECT
72817 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72818 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72819 +#endif
72820 +
72821 for (nstart = start ; ; ) {
72822 unsigned long newflags;
72823
72824 @@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72825
72826 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72827 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72828 + if (prot & (PROT_WRITE | PROT_EXEC))
72829 + gr_log_rwxmprotect(vma->vm_file);
72830 +
72831 + error = -EACCES;
72832 + goto out;
72833 + }
72834 +
72835 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72836 error = -EACCES;
72837 goto out;
72838 }
72839 @@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72840 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72841 if (error)
72842 goto out;
72843 +
72844 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72845 +
72846 nstart = tmp;
72847
72848 if (nstart < prev->vm_end)
72849 diff --git a/mm/mremap.c b/mm/mremap.c
72850 index 87bb839..c3bfadb 100644
72851 --- a/mm/mremap.c
72852 +++ b/mm/mremap.c
72853 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72854 continue;
72855 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72856 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72857 +
72858 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72859 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72860 + pte = pte_exprotect(pte);
72861 +#endif
72862 +
72863 set_pte_at(mm, new_addr, new_pte, pte);
72864 }
72865
72866 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72867 if (is_vm_hugetlb_page(vma))
72868 goto Einval;
72869
72870 +#ifdef CONFIG_PAX_SEGMEXEC
72871 + if (pax_find_mirror_vma(vma))
72872 + goto Einval;
72873 +#endif
72874 +
72875 /* We can't remap across vm area boundaries */
72876 if (old_len > vma->vm_end - addr)
72877 goto Efault;
72878 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72879 unsigned long ret = -EINVAL;
72880 unsigned long charged = 0;
72881 unsigned long map_flags;
72882 + unsigned long pax_task_size = TASK_SIZE;
72883
72884 if (new_addr & ~PAGE_MASK)
72885 goto out;
72886
72887 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72888 +#ifdef CONFIG_PAX_SEGMEXEC
72889 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72890 + pax_task_size = SEGMEXEC_TASK_SIZE;
72891 +#endif
72892 +
72893 + pax_task_size -= PAGE_SIZE;
72894 +
72895 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72896 goto out;
72897
72898 /* Check if the location we're moving into overlaps the
72899 * old location at all, and fail if it does.
72900 */
72901 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72902 - goto out;
72903 -
72904 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72905 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72906 goto out;
72907
72908 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72909 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72910 struct vm_area_struct *vma;
72911 unsigned long ret = -EINVAL;
72912 unsigned long charged = 0;
72913 + unsigned long pax_task_size = TASK_SIZE;
72914
72915 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72916 goto out;
72917 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72918 if (!new_len)
72919 goto out;
72920
72921 +#ifdef CONFIG_PAX_SEGMEXEC
72922 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72923 + pax_task_size = SEGMEXEC_TASK_SIZE;
72924 +#endif
72925 +
72926 + pax_task_size -= PAGE_SIZE;
72927 +
72928 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72929 + old_len > pax_task_size || addr > pax_task_size-old_len)
72930 + goto out;
72931 +
72932 if (flags & MREMAP_FIXED) {
72933 if (flags & MREMAP_MAYMOVE)
72934 ret = mremap_to(addr, old_len, new_addr, new_len);
72935 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72936 addr + new_len);
72937 }
72938 ret = addr;
72939 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72940 goto out;
72941 }
72942 }
72943 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72944 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72945 if (ret)
72946 goto out;
72947 +
72948 + map_flags = vma->vm_flags;
72949 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72950 + if (!(ret & ~PAGE_MASK)) {
72951 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72952 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72953 + }
72954 }
72955 out:
72956 if (ret & ~PAGE_MASK)
72957 diff --git a/mm/nommu.c b/mm/nommu.c
72958 index f59e170..34e2a2b 100644
72959 --- a/mm/nommu.c
72960 +++ b/mm/nommu.c
72961 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72962 int sysctl_overcommit_ratio = 50; /* default is 50% */
72963 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72964 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72965 -int heap_stack_gap = 0;
72966
72967 atomic_long_t mmap_pages_allocated;
72968
72969 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72970 EXPORT_SYMBOL(find_vma);
72971
72972 /*
72973 - * find a VMA
72974 - * - we don't extend stack VMAs under NOMMU conditions
72975 - */
72976 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72977 -{
72978 - return find_vma(mm, addr);
72979 -}
72980 -
72981 -/*
72982 * expand a stack to a given address
72983 * - not supported under NOMMU conditions
72984 */
72985 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72986
72987 /* most fields are the same, copy all, and then fixup */
72988 *new = *vma;
72989 + INIT_LIST_HEAD(&new->anon_vma_chain);
72990 *region = *vma->vm_region;
72991 new->vm_region = region;
72992
72993 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72994 index a13ded1..b949d15 100644
72995 --- a/mm/page_alloc.c
72996 +++ b/mm/page_alloc.c
72997 @@ -335,7 +335,7 @@ out:
72998 * This usage means that zero-order pages may not be compound.
72999 */
73000
73001 -static void free_compound_page(struct page *page)
73002 +void free_compound_page(struct page *page)
73003 {
73004 __free_pages_ok(page, compound_order(page));
73005 }
73006 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73007 int i;
73008 int bad = 0;
73009
73010 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73011 + unsigned long index = 1UL << order;
73012 +#endif
73013 +
73014 trace_mm_page_free(page, order);
73015 kmemcheck_free_shadow(page, order);
73016
73017 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73018 debug_check_no_obj_freed(page_address(page),
73019 PAGE_SIZE << order);
73020 }
73021 +
73022 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73023 + for (; index; --index)
73024 + sanitize_highpage(page + index - 1);
73025 +#endif
73026 +
73027 arch_free_page(page, order);
73028 kernel_map_pages(page, 1 << order, 0);
73029
73030 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73031 arch_alloc_page(page, order);
73032 kernel_map_pages(page, 1 << order, 1);
73033
73034 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
73035 if (gfp_flags & __GFP_ZERO)
73036 prep_zero_page(page, order, gfp_flags);
73037 +#endif
73038
73039 if (order && (gfp_flags & __GFP_COMP))
73040 prep_compound_page(page, order);
73041 @@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73042 unsigned long pfn;
73043
73044 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73045 +#ifdef CONFIG_X86_32
73046 + /* boot failures in VMware 8 on 32bit vanilla since
73047 + this change */
73048 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73049 +#else
73050 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73051 +#endif
73052 return 1;
73053 }
73054 return 0;
73055 diff --git a/mm/percpu.c b/mm/percpu.c
73056 index f47af91..7eeef99 100644
73057 --- a/mm/percpu.c
73058 +++ b/mm/percpu.c
73059 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73060 static unsigned int pcpu_high_unit_cpu __read_mostly;
73061
73062 /* the address of the first chunk which starts with the kernel static area */
73063 -void *pcpu_base_addr __read_mostly;
73064 +void *pcpu_base_addr __read_only;
73065 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73066
73067 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73068 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73069 index c20ff48..137702a 100644
73070 --- a/mm/process_vm_access.c
73071 +++ b/mm/process_vm_access.c
73072 @@ -13,6 +13,7 @@
73073 #include <linux/uio.h>
73074 #include <linux/sched.h>
73075 #include <linux/highmem.h>
73076 +#include <linux/security.h>
73077 #include <linux/ptrace.h>
73078 #include <linux/slab.h>
73079 #include <linux/syscalls.h>
73080 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73081 size_t iov_l_curr_offset = 0;
73082 ssize_t iov_len;
73083
73084 + return -ENOSYS; // PaX: until properly audited
73085 +
73086 /*
73087 * Work out how many pages of struct pages we're going to need
73088 * when eventually calling get_user_pages
73089 */
73090 for (i = 0; i < riovcnt; i++) {
73091 iov_len = rvec[i].iov_len;
73092 - if (iov_len > 0) {
73093 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
73094 - + iov_len)
73095 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73096 - / PAGE_SIZE + 1;
73097 - nr_pages = max(nr_pages, nr_pages_iov);
73098 - }
73099 + if (iov_len <= 0)
73100 + continue;
73101 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73102 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73103 + nr_pages = max(nr_pages, nr_pages_iov);
73104 }
73105
73106 if (nr_pages == 0)
73107 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73108 goto free_proc_pages;
73109 }
73110
73111 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73112 + rc = -EPERM;
73113 + goto put_task_struct;
73114 + }
73115 +
73116 mm = mm_access(task, PTRACE_MODE_ATTACH);
73117 if (!mm || IS_ERR(mm)) {
73118 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73119 diff --git a/mm/rmap.c b/mm/rmap.c
73120 index c8454e0..b04f3a2 100644
73121 --- a/mm/rmap.c
73122 +++ b/mm/rmap.c
73123 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73124 struct anon_vma *anon_vma = vma->anon_vma;
73125 struct anon_vma_chain *avc;
73126
73127 +#ifdef CONFIG_PAX_SEGMEXEC
73128 + struct anon_vma_chain *avc_m = NULL;
73129 +#endif
73130 +
73131 might_sleep();
73132 if (unlikely(!anon_vma)) {
73133 struct mm_struct *mm = vma->vm_mm;
73134 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73135 if (!avc)
73136 goto out_enomem;
73137
73138 +#ifdef CONFIG_PAX_SEGMEXEC
73139 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73140 + if (!avc_m)
73141 + goto out_enomem_free_avc;
73142 +#endif
73143 +
73144 anon_vma = find_mergeable_anon_vma(vma);
73145 allocated = NULL;
73146 if (!anon_vma) {
73147 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73148 /* page_table_lock to protect against threads */
73149 spin_lock(&mm->page_table_lock);
73150 if (likely(!vma->anon_vma)) {
73151 +
73152 +#ifdef CONFIG_PAX_SEGMEXEC
73153 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73154 +
73155 + if (vma_m) {
73156 + BUG_ON(vma_m->anon_vma);
73157 + vma_m->anon_vma = anon_vma;
73158 + avc_m->anon_vma = anon_vma;
73159 + avc_m->vma = vma;
73160 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73161 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
73162 + avc_m = NULL;
73163 + }
73164 +#endif
73165 +
73166 vma->anon_vma = anon_vma;
73167 avc->anon_vma = anon_vma;
73168 avc->vma = vma;
73169 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73170
73171 if (unlikely(allocated))
73172 put_anon_vma(allocated);
73173 +
73174 +#ifdef CONFIG_PAX_SEGMEXEC
73175 + if (unlikely(avc_m))
73176 + anon_vma_chain_free(avc_m);
73177 +#endif
73178 +
73179 if (unlikely(avc))
73180 anon_vma_chain_free(avc);
73181 }
73182 return 0;
73183
73184 out_enomem_free_avc:
73185 +
73186 +#ifdef CONFIG_PAX_SEGMEXEC
73187 + if (avc_m)
73188 + anon_vma_chain_free(avc_m);
73189 +#endif
73190 +
73191 anon_vma_chain_free(avc);
73192 out_enomem:
73193 return -ENOMEM;
73194 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73195 * Attach the anon_vmas from src to dst.
73196 * Returns 0 on success, -ENOMEM on failure.
73197 */
73198 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73199 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73200 {
73201 struct anon_vma_chain *avc, *pavc;
73202 struct anon_vma *root = NULL;
73203 @@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73204 * the corresponding VMA in the parent process is attached to.
73205 * Returns 0 on success, non-zero on failure.
73206 */
73207 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73208 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73209 {
73210 struct anon_vma_chain *avc;
73211 struct anon_vma *anon_vma;
73212 diff --git a/mm/shmem.c b/mm/shmem.c
73213 index 269d049..a9d2b50 100644
73214 --- a/mm/shmem.c
73215 +++ b/mm/shmem.c
73216 @@ -31,7 +31,7 @@
73217 #include <linux/export.h>
73218 #include <linux/swap.h>
73219
73220 -static struct vfsmount *shm_mnt;
73221 +struct vfsmount *shm_mnt;
73222
73223 #ifdef CONFIG_SHMEM
73224 /*
73225 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73226 #define BOGO_DIRENT_SIZE 20
73227
73228 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73229 -#define SHORT_SYMLINK_LEN 128
73230 +#define SHORT_SYMLINK_LEN 64
73231
73232 struct shmem_xattr {
73233 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73234 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73235 int err = -ENOMEM;
73236
73237 /* Round up to L1_CACHE_BYTES to resist false sharing */
73238 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73239 - L1_CACHE_BYTES), GFP_KERNEL);
73240 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73241 if (!sbinfo)
73242 return -ENOMEM;
73243
73244 diff --git a/mm/slab.c b/mm/slab.c
73245 index f0bd785..348b96a 100644
73246 --- a/mm/slab.c
73247 +++ b/mm/slab.c
73248 @@ -153,7 +153,7 @@
73249
73250 /* Legal flag mask for kmem_cache_create(). */
73251 #if DEBUG
73252 -# define CREATE_MASK (SLAB_RED_ZONE | \
73253 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73254 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73255 SLAB_CACHE_DMA | \
73256 SLAB_STORE_USER | \
73257 @@ -161,7 +161,7 @@
73258 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73259 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73260 #else
73261 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73262 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73263 SLAB_CACHE_DMA | \
73264 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73265 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73266 @@ -290,7 +290,7 @@ struct kmem_list3 {
73267 * Need this for bootstrapping a per node allocator.
73268 */
73269 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73270 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73271 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73272 #define CACHE_CACHE 0
73273 #define SIZE_AC MAX_NUMNODES
73274 #define SIZE_L3 (2 * MAX_NUMNODES)
73275 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73276 if ((x)->max_freeable < i) \
73277 (x)->max_freeable = i; \
73278 } while (0)
73279 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73280 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73281 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73282 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73283 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73284 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73285 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73286 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73287 #else
73288 #define STATS_INC_ACTIVE(x) do { } while (0)
73289 #define STATS_DEC_ACTIVE(x) do { } while (0)
73290 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73291 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73292 */
73293 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73294 - const struct slab *slab, void *obj)
73295 + const struct slab *slab, const void *obj)
73296 {
73297 u32 offset = (obj - slab->s_mem);
73298 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73299 @@ -568,7 +568,7 @@ struct cache_names {
73300 static struct cache_names __initdata cache_names[] = {
73301 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73302 #include <linux/kmalloc_sizes.h>
73303 - {NULL,}
73304 + {NULL}
73305 #undef CACHE
73306 };
73307
73308 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
73309 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73310 sizes[INDEX_AC].cs_size,
73311 ARCH_KMALLOC_MINALIGN,
73312 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73313 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73314 NULL);
73315
73316 if (INDEX_AC != INDEX_L3) {
73317 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
73318 kmem_cache_create(names[INDEX_L3].name,
73319 sizes[INDEX_L3].cs_size,
73320 ARCH_KMALLOC_MINALIGN,
73321 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73322 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73323 NULL);
73324 }
73325
73326 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
73327 sizes->cs_cachep = kmem_cache_create(names->name,
73328 sizes->cs_size,
73329 ARCH_KMALLOC_MINALIGN,
73330 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73331 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73332 NULL);
73333 }
73334 #ifdef CONFIG_ZONE_DMA
73335 @@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
73336 }
73337 /* cpu stats */
73338 {
73339 - unsigned long allochit = atomic_read(&cachep->allochit);
73340 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73341 - unsigned long freehit = atomic_read(&cachep->freehit);
73342 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73343 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73344 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73345 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73346 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73347
73348 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73349 allochit, allocmiss, freehit, freemiss);
73350 @@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
73351 {
73352 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73353 #ifdef CONFIG_DEBUG_SLAB_LEAK
73354 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73355 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73356 #endif
73357 return 0;
73358 }
73359 module_init(slab_proc_init);
73360 #endif
73361
73362 +void check_object_size(const void *ptr, unsigned long n, bool to)
73363 +{
73364 +
73365 +#ifdef CONFIG_PAX_USERCOPY
73366 + struct page *page;
73367 + struct kmem_cache *cachep = NULL;
73368 + struct slab *slabp;
73369 + unsigned int objnr;
73370 + unsigned long offset;
73371 + const char *type;
73372 +
73373 + if (!n)
73374 + return;
73375 +
73376 + type = "<null>";
73377 + if (ZERO_OR_NULL_PTR(ptr))
73378 + goto report;
73379 +
73380 + if (!virt_addr_valid(ptr))
73381 + return;
73382 +
73383 + page = virt_to_head_page(ptr);
73384 +
73385 + type = "<process stack>";
73386 + if (!PageSlab(page)) {
73387 + if (object_is_on_stack(ptr, n) == -1)
73388 + goto report;
73389 + return;
73390 + }
73391 +
73392 + cachep = page_get_cache(page);
73393 + type = cachep->name;
73394 + if (!(cachep->flags & SLAB_USERCOPY))
73395 + goto report;
73396 +
73397 + slabp = page_get_slab(page);
73398 + objnr = obj_to_index(cachep, slabp, ptr);
73399 + BUG_ON(objnr >= cachep->num);
73400 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73401 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73402 + return;
73403 +
73404 +report:
73405 + pax_report_usercopy(ptr, n, to, type);
73406 +#endif
73407 +
73408 +}
73409 +EXPORT_SYMBOL(check_object_size);
73410 +
73411 /**
73412 * ksize - get the actual amount of memory allocated for a given object
73413 * @objp: Pointer to the object
73414 diff --git a/mm/slob.c b/mm/slob.c
73415 index 8105be4..e045f96 100644
73416 --- a/mm/slob.c
73417 +++ b/mm/slob.c
73418 @@ -29,7 +29,7 @@
73419 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73420 * alloc_pages() directly, allocating compound pages so the page order
73421 * does not have to be separately tracked, and also stores the exact
73422 - * allocation size in page->private so that it can be used to accurately
73423 + * allocation size in slob_page->size so that it can be used to accurately
73424 * provide ksize(). These objects are detected in kfree() because slob_page()
73425 * is false for them.
73426 *
73427 @@ -58,6 +58,7 @@
73428 */
73429
73430 #include <linux/kernel.h>
73431 +#include <linux/sched.h>
73432 #include <linux/slab.h>
73433 #include <linux/mm.h>
73434 #include <linux/swap.h> /* struct reclaim_state */
73435 @@ -102,7 +103,8 @@ struct slob_page {
73436 unsigned long flags; /* mandatory */
73437 atomic_t _count; /* mandatory */
73438 slobidx_t units; /* free units left in page */
73439 - unsigned long pad[2];
73440 + unsigned long pad[1];
73441 + unsigned long size; /* size when >=PAGE_SIZE */
73442 slob_t *free; /* first free slob_t in page */
73443 struct list_head list; /* linked list of free pages */
73444 };
73445 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73446 */
73447 static inline int is_slob_page(struct slob_page *sp)
73448 {
73449 - return PageSlab((struct page *)sp);
73450 + return PageSlab((struct page *)sp) && !sp->size;
73451 }
73452
73453 static inline void set_slob_page(struct slob_page *sp)
73454 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73455
73456 static inline struct slob_page *slob_page(const void *addr)
73457 {
73458 - return (struct slob_page *)virt_to_page(addr);
73459 + return (struct slob_page *)virt_to_head_page(addr);
73460 }
73461
73462 /*
73463 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73464 /*
73465 * Return the size of a slob block.
73466 */
73467 -static slobidx_t slob_units(slob_t *s)
73468 +static slobidx_t slob_units(const slob_t *s)
73469 {
73470 if (s->units > 0)
73471 return s->units;
73472 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73473 /*
73474 * Return the next free slob block pointer after this one.
73475 */
73476 -static slob_t *slob_next(slob_t *s)
73477 +static slob_t *slob_next(const slob_t *s)
73478 {
73479 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73480 slobidx_t next;
73481 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73482 /*
73483 * Returns true if s is the last free block in its page.
73484 */
73485 -static int slob_last(slob_t *s)
73486 +static int slob_last(const slob_t *s)
73487 {
73488 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73489 }
73490 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73491 if (!page)
73492 return NULL;
73493
73494 + set_slob_page(page);
73495 return page_address(page);
73496 }
73497
73498 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73499 if (!b)
73500 return NULL;
73501 sp = slob_page(b);
73502 - set_slob_page(sp);
73503
73504 spin_lock_irqsave(&slob_lock, flags);
73505 sp->units = SLOB_UNITS(PAGE_SIZE);
73506 sp->free = b;
73507 + sp->size = 0;
73508 INIT_LIST_HEAD(&sp->list);
73509 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73510 set_slob_page_free(sp, slob_list);
73511 @@ -476,10 +479,9 @@ out:
73512 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73513 */
73514
73515 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73516 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73517 {
73518 - unsigned int *m;
73519 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73520 + slob_t *m;
73521 void *ret;
73522
73523 gfp &= gfp_allowed_mask;
73524 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73525
73526 if (!m)
73527 return NULL;
73528 - *m = size;
73529 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73530 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73531 + m[0].units = size;
73532 + m[1].units = align;
73533 ret = (void *)m + align;
73534
73535 trace_kmalloc_node(_RET_IP_, ret,
73536 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73537 gfp |= __GFP_COMP;
73538 ret = slob_new_pages(gfp, order, node);
73539 if (ret) {
73540 - struct page *page;
73541 - page = virt_to_page(ret);
73542 - page->private = size;
73543 + struct slob_page *sp;
73544 + sp = slob_page(ret);
73545 + sp->size = size;
73546 }
73547
73548 trace_kmalloc_node(_RET_IP_, ret,
73549 size, PAGE_SIZE << order, gfp, node);
73550 }
73551
73552 - kmemleak_alloc(ret, size, 1, gfp);
73553 + return ret;
73554 +}
73555 +
73556 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73557 +{
73558 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73559 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73560 +
73561 + if (!ZERO_OR_NULL_PTR(ret))
73562 + kmemleak_alloc(ret, size, 1, gfp);
73563 return ret;
73564 }
73565 EXPORT_SYMBOL(__kmalloc_node);
73566 @@ -533,13 +547,92 @@ void kfree(const void *block)
73567 sp = slob_page(block);
73568 if (is_slob_page(sp)) {
73569 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73570 - unsigned int *m = (unsigned int *)(block - align);
73571 - slob_free(m, *m + align);
73572 - } else
73573 + slob_t *m = (slob_t *)(block - align);
73574 + slob_free(m, m[0].units + align);
73575 + } else {
73576 + clear_slob_page(sp);
73577 + free_slob_page(sp);
73578 + sp->size = 0;
73579 put_page(&sp->page);
73580 + }
73581 }
73582 EXPORT_SYMBOL(kfree);
73583
73584 +void check_object_size(const void *ptr, unsigned long n, bool to)
73585 +{
73586 +
73587 +#ifdef CONFIG_PAX_USERCOPY
73588 + struct slob_page *sp;
73589 + const slob_t *free;
73590 + const void *base;
73591 + unsigned long flags;
73592 + const char *type;
73593 +
73594 + if (!n)
73595 + return;
73596 +
73597 + type = "<null>";
73598 + if (ZERO_OR_NULL_PTR(ptr))
73599 + goto report;
73600 +
73601 + if (!virt_addr_valid(ptr))
73602 + return;
73603 +
73604 + type = "<process stack>";
73605 + sp = slob_page(ptr);
73606 + if (!PageSlab((struct page *)sp)) {
73607 + if (object_is_on_stack(ptr, n) == -1)
73608 + goto report;
73609 + return;
73610 + }
73611 +
73612 + type = "<slob>";
73613 + if (sp->size) {
73614 + base = page_address(&sp->page);
73615 + if (base <= ptr && n <= sp->size - (ptr - base))
73616 + return;
73617 + goto report;
73618 + }
73619 +
73620 + /* some tricky double walking to find the chunk */
73621 + spin_lock_irqsave(&slob_lock, flags);
73622 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73623 + free = sp->free;
73624 +
73625 + while (!slob_last(free) && (void *)free <= ptr) {
73626 + base = free + slob_units(free);
73627 + free = slob_next(free);
73628 + }
73629 +
73630 + while (base < (void *)free) {
73631 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73632 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73633 + int offset;
73634 +
73635 + if (ptr < base + align)
73636 + break;
73637 +
73638 + offset = ptr - base - align;
73639 + if (offset >= m) {
73640 + base += size;
73641 + continue;
73642 + }
73643 +
73644 + if (n > m - offset)
73645 + break;
73646 +
73647 + spin_unlock_irqrestore(&slob_lock, flags);
73648 + return;
73649 + }
73650 +
73651 + spin_unlock_irqrestore(&slob_lock, flags);
73652 +report:
73653 + pax_report_usercopy(ptr, n, to, type);
73654 +#endif
73655 +
73656 +}
73657 +EXPORT_SYMBOL(check_object_size);
73658 +
73659 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73660 size_t ksize(const void *block)
73661 {
73662 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
73663 sp = slob_page(block);
73664 if (is_slob_page(sp)) {
73665 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73666 - unsigned int *m = (unsigned int *)(block - align);
73667 - return SLOB_UNITS(*m) * SLOB_UNIT;
73668 + slob_t *m = (slob_t *)(block - align);
73669 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73670 } else
73671 - return sp->page.private;
73672 + return sp->size;
73673 }
73674 EXPORT_SYMBOL(ksize);
73675
73676 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73677 {
73678 struct kmem_cache *c;
73679
73680 +#ifdef CONFIG_PAX_USERCOPY
73681 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73682 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73683 +#else
73684 c = slob_alloc(sizeof(struct kmem_cache),
73685 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73686 +#endif
73687
73688 if (c) {
73689 c->name = name;
73690 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73691
73692 lockdep_trace_alloc(flags);
73693
73694 +#ifdef CONFIG_PAX_USERCOPY
73695 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73696 +#else
73697 if (c->size < PAGE_SIZE) {
73698 b = slob_alloc(c->size, flags, c->align, node);
73699 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73700 SLOB_UNITS(c->size) * SLOB_UNIT,
73701 flags, node);
73702 } else {
73703 + struct slob_page *sp;
73704 +
73705 b = slob_new_pages(flags, get_order(c->size), node);
73706 + sp = slob_page(b);
73707 + sp->size = c->size;
73708 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73709 PAGE_SIZE << get_order(c->size),
73710 flags, node);
73711 }
73712 +#endif
73713
73714 if (c->ctor)
73715 c->ctor(b);
73716 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73717
73718 static void __kmem_cache_free(void *b, int size)
73719 {
73720 - if (size < PAGE_SIZE)
73721 + struct slob_page *sp = slob_page(b);
73722 +
73723 + if (is_slob_page(sp))
73724 slob_free(b, size);
73725 - else
73726 + else {
73727 + clear_slob_page(sp);
73728 + free_slob_page(sp);
73729 + sp->size = 0;
73730 slob_free_pages(b, get_order(size));
73731 + }
73732 }
73733
73734 static void kmem_rcu_free(struct rcu_head *head)
73735 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73736
73737 void kmem_cache_free(struct kmem_cache *c, void *b)
73738 {
73739 + int size = c->size;
73740 +
73741 +#ifdef CONFIG_PAX_USERCOPY
73742 + if (size + c->align < PAGE_SIZE) {
73743 + size += c->align;
73744 + b -= c->align;
73745 + }
73746 +#endif
73747 +
73748 kmemleak_free_recursive(b, c->flags);
73749 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73750 struct slob_rcu *slob_rcu;
73751 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73752 - slob_rcu->size = c->size;
73753 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73754 + slob_rcu->size = size;
73755 call_rcu(&slob_rcu->head, kmem_rcu_free);
73756 } else {
73757 - __kmem_cache_free(b, c->size);
73758 + __kmem_cache_free(b, size);
73759 }
73760
73761 +#ifdef CONFIG_PAX_USERCOPY
73762 + trace_kfree(_RET_IP_, b);
73763 +#else
73764 trace_kmem_cache_free(_RET_IP_, b);
73765 +#endif
73766 +
73767 }
73768 EXPORT_SYMBOL(kmem_cache_free);
73769
73770 diff --git a/mm/slub.c b/mm/slub.c
73771 index 0342a5d..8180ae9 100644
73772 --- a/mm/slub.c
73773 +++ b/mm/slub.c
73774 @@ -208,7 +208,7 @@ struct track {
73775
73776 enum track_item { TRACK_ALLOC, TRACK_FREE };
73777
73778 -#ifdef CONFIG_SYSFS
73779 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73780 static int sysfs_slab_add(struct kmem_cache *);
73781 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73782 static void sysfs_slab_remove(struct kmem_cache *);
73783 @@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
73784 if (!t->addr)
73785 return;
73786
73787 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73788 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73789 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73790 #ifdef CONFIG_STACKTRACE
73791 {
73792 @@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73793
73794 page = virt_to_head_page(x);
73795
73796 + BUG_ON(!PageSlab(page));
73797 +
73798 slab_free(s, page, x, _RET_IP_);
73799
73800 trace_kmem_cache_free(_RET_IP_, x);
73801 @@ -2604,7 +2606,7 @@ static int slub_min_objects;
73802 * Merge control. If this is set then no merging of slab caches will occur.
73803 * (Could be removed. This was introduced to pacify the merge skeptics.)
73804 */
73805 -static int slub_nomerge;
73806 +static int slub_nomerge = 1;
73807
73808 /*
73809 * Calculate the order of allocation given an slab object size.
73810 @@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73811 else
73812 s->cpu_partial = 30;
73813
73814 - s->refcount = 1;
73815 + atomic_set(&s->refcount, 1);
73816 #ifdef CONFIG_NUMA
73817 s->remote_node_defrag_ratio = 1000;
73818 #endif
73819 @@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73820 void kmem_cache_destroy(struct kmem_cache *s)
73821 {
73822 down_write(&slub_lock);
73823 - s->refcount--;
73824 - if (!s->refcount) {
73825 + if (atomic_dec_and_test(&s->refcount)) {
73826 list_del(&s->list);
73827 up_write(&slub_lock);
73828 if (kmem_cache_close(s)) {
73829 @@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73830 EXPORT_SYMBOL(__kmalloc_node);
73831 #endif
73832
73833 +void check_object_size(const void *ptr, unsigned long n, bool to)
73834 +{
73835 +
73836 +#ifdef CONFIG_PAX_USERCOPY
73837 + struct page *page;
73838 + struct kmem_cache *s = NULL;
73839 + unsigned long offset;
73840 + const char *type;
73841 +
73842 + if (!n)
73843 + return;
73844 +
73845 + type = "<null>";
73846 + if (ZERO_OR_NULL_PTR(ptr))
73847 + goto report;
73848 +
73849 + if (!virt_addr_valid(ptr))
73850 + return;
73851 +
73852 + page = virt_to_head_page(ptr);
73853 +
73854 + type = "<process stack>";
73855 + if (!PageSlab(page)) {
73856 + if (object_is_on_stack(ptr, n) == -1)
73857 + goto report;
73858 + return;
73859 + }
73860 +
73861 + s = page->slab;
73862 + type = s->name;
73863 + if (!(s->flags & SLAB_USERCOPY))
73864 + goto report;
73865 +
73866 + offset = (ptr - page_address(page)) % s->size;
73867 + if (offset <= s->objsize && n <= s->objsize - offset)
73868 + return;
73869 +
73870 +report:
73871 + pax_report_usercopy(ptr, n, to, type);
73872 +#endif
73873 +
73874 +}
73875 +EXPORT_SYMBOL(check_object_size);
73876 +
73877 size_t ksize(const void *object)
73878 {
73879 struct page *page;
73880 @@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73881 int node;
73882
73883 list_add(&s->list, &slab_caches);
73884 - s->refcount = -1;
73885 + atomic_set(&s->refcount, -1);
73886
73887 for_each_node_state(node, N_NORMAL_MEMORY) {
73888 struct kmem_cache_node *n = get_node(s, node);
73889 @@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
73890
73891 /* Caches that are not of the two-to-the-power-of size */
73892 if (KMALLOC_MIN_SIZE <= 32) {
73893 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73894 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73895 caches++;
73896 }
73897
73898 if (KMALLOC_MIN_SIZE <= 64) {
73899 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73900 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73901 caches++;
73902 }
73903
73904 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73905 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73906 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73907 caches++;
73908 }
73909
73910 @@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73911 /*
73912 * We may have set a slab to be unmergeable during bootstrap.
73913 */
73914 - if (s->refcount < 0)
73915 + if (atomic_read(&s->refcount) < 0)
73916 return 1;
73917
73918 return 0;
73919 @@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73920 down_write(&slub_lock);
73921 s = find_mergeable(size, align, flags, name, ctor);
73922 if (s) {
73923 - s->refcount++;
73924 + atomic_inc(&s->refcount);
73925 /*
73926 * Adjust the object sizes so that we clear
73927 * the complete object on kzalloc.
73928 @@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73929 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73930
73931 if (sysfs_slab_alias(s, name)) {
73932 - s->refcount--;
73933 + atomic_dec(&s->refcount);
73934 goto err;
73935 }
73936 up_write(&slub_lock);
73937 @@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73938 }
73939 #endif
73940
73941 -#ifdef CONFIG_SYSFS
73942 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73943 static int count_inuse(struct page *page)
73944 {
73945 return page->inuse;
73946 @@ -4429,12 +4474,12 @@ static void resiliency_test(void)
73947 validate_slab_cache(kmalloc_caches[9]);
73948 }
73949 #else
73950 -#ifdef CONFIG_SYSFS
73951 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73952 static void resiliency_test(void) {};
73953 #endif
73954 #endif
73955
73956 -#ifdef CONFIG_SYSFS
73957 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73958 enum slab_stat_type {
73959 SL_ALL, /* All slabs */
73960 SL_PARTIAL, /* Only partially allocated slabs */
73961 @@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
73962
73963 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73964 {
73965 - return sprintf(buf, "%d\n", s->refcount - 1);
73966 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73967 }
73968 SLAB_ATTR_RO(aliases);
73969
73970 @@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
73971 return name;
73972 }
73973
73974 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73975 static int sysfs_slab_add(struct kmem_cache *s)
73976 {
73977 int err;
73978 @@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73979 kobject_del(&s->kobj);
73980 kobject_put(&s->kobj);
73981 }
73982 +#endif
73983
73984 /*
73985 * Need to buffer aliases during bootup until sysfs becomes
73986 @@ -5319,6 +5366,7 @@ struct saved_alias {
73987
73988 static struct saved_alias *alias_list;
73989
73990 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73991 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73992 {
73993 struct saved_alias *al;
73994 @@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73995 alias_list = al;
73996 return 0;
73997 }
73998 +#endif
73999
74000 static int __init slab_sysfs_init(void)
74001 {
74002 diff --git a/mm/swap.c b/mm/swap.c
74003 index 14380e9..e244704 100644
74004 --- a/mm/swap.c
74005 +++ b/mm/swap.c
74006 @@ -30,6 +30,7 @@
74007 #include <linux/backing-dev.h>
74008 #include <linux/memcontrol.h>
74009 #include <linux/gfp.h>
74010 +#include <linux/hugetlb.h>
74011
74012 #include "internal.h"
74013
74014 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
74015
74016 __page_cache_release(page);
74017 dtor = get_compound_page_dtor(page);
74018 + if (!PageHuge(page))
74019 + BUG_ON(dtor != free_compound_page);
74020 (*dtor)(page);
74021 }
74022
74023 diff --git a/mm/swapfile.c b/mm/swapfile.c
74024 index f31b29d..8bdcae2 100644
74025 --- a/mm/swapfile.c
74026 +++ b/mm/swapfile.c
74027 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
74028
74029 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74030 /* Activity counter to indicate that a swapon or swapoff has occurred */
74031 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
74032 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74033
74034 static inline unsigned char swap_count(unsigned char ent)
74035 {
74036 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74037 }
74038 filp_close(swap_file, NULL);
74039 err = 0;
74040 - atomic_inc(&proc_poll_event);
74041 + atomic_inc_unchecked(&proc_poll_event);
74042 wake_up_interruptible(&proc_poll_wait);
74043
74044 out_dput:
74045 @@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74046
74047 poll_wait(file, &proc_poll_wait, wait);
74048
74049 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
74050 - seq->poll_event = atomic_read(&proc_poll_event);
74051 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74052 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74053 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74054 }
74055
74056 @@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74057 return ret;
74058
74059 seq = file->private_data;
74060 - seq->poll_event = atomic_read(&proc_poll_event);
74061 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74062 return 0;
74063 }
74064
74065 @@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74066 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74067
74068 mutex_unlock(&swapon_mutex);
74069 - atomic_inc(&proc_poll_event);
74070 + atomic_inc_unchecked(&proc_poll_event);
74071 wake_up_interruptible(&proc_poll_wait);
74072
74073 if (S_ISREG(inode->i_mode))
74074 diff --git a/mm/util.c b/mm/util.c
74075 index 136ac4f..f917fa9 100644
74076 --- a/mm/util.c
74077 +++ b/mm/util.c
74078 @@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74079 void arch_pick_mmap_layout(struct mm_struct *mm)
74080 {
74081 mm->mmap_base = TASK_UNMAPPED_BASE;
74082 +
74083 +#ifdef CONFIG_PAX_RANDMMAP
74084 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74085 + mm->mmap_base += mm->delta_mmap;
74086 +#endif
74087 +
74088 mm->get_unmapped_area = arch_get_unmapped_area;
74089 mm->unmap_area = arch_unmap_area;
74090 }
74091 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74092 index 86ce9a5..bc498f3 100644
74093 --- a/mm/vmalloc.c
74094 +++ b/mm/vmalloc.c
74095 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74096
74097 pte = pte_offset_kernel(pmd, addr);
74098 do {
74099 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74100 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74101 +
74102 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74103 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74104 + BUG_ON(!pte_exec(*pte));
74105 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74106 + continue;
74107 + }
74108 +#endif
74109 +
74110 + {
74111 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74112 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74113 + }
74114 } while (pte++, addr += PAGE_SIZE, addr != end);
74115 }
74116
74117 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74118 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74119 {
74120 pte_t *pte;
74121 + int ret = -ENOMEM;
74122
74123 /*
74124 * nr is a running index into the array which helps higher level
74125 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74126 pte = pte_alloc_kernel(pmd, addr);
74127 if (!pte)
74128 return -ENOMEM;
74129 +
74130 + pax_open_kernel();
74131 do {
74132 struct page *page = pages[*nr];
74133
74134 - if (WARN_ON(!pte_none(*pte)))
74135 - return -EBUSY;
74136 - if (WARN_ON(!page))
74137 - return -ENOMEM;
74138 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74139 + if (pgprot_val(prot) & _PAGE_NX)
74140 +#endif
74141 +
74142 + if (WARN_ON(!pte_none(*pte))) {
74143 + ret = -EBUSY;
74144 + goto out;
74145 + }
74146 + if (WARN_ON(!page)) {
74147 + ret = -ENOMEM;
74148 + goto out;
74149 + }
74150 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74151 (*nr)++;
74152 } while (pte++, addr += PAGE_SIZE, addr != end);
74153 - return 0;
74154 + ret = 0;
74155 +out:
74156 + pax_close_kernel();
74157 + return ret;
74158 }
74159
74160 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74161 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74162 * and fall back on vmalloc() if that fails. Others
74163 * just put it in the vmalloc space.
74164 */
74165 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74166 +#ifdef CONFIG_MODULES
74167 +#ifdef MODULES_VADDR
74168 unsigned long addr = (unsigned long)x;
74169 if (addr >= MODULES_VADDR && addr < MODULES_END)
74170 return 1;
74171 #endif
74172 +
74173 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74174 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74175 + return 1;
74176 +#endif
74177 +
74178 +#endif
74179 +
74180 return is_vmalloc_addr(x);
74181 }
74182
74183 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74184
74185 if (!pgd_none(*pgd)) {
74186 pud_t *pud = pud_offset(pgd, addr);
74187 +#ifdef CONFIG_X86
74188 + if (!pud_large(*pud))
74189 +#endif
74190 if (!pud_none(*pud)) {
74191 pmd_t *pmd = pmd_offset(pud, addr);
74192 +#ifdef CONFIG_X86
74193 + if (!pmd_large(*pmd))
74194 +#endif
74195 if (!pmd_none(*pmd)) {
74196 pte_t *ptep, pte;
74197
74198 @@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74199 struct vm_struct *area;
74200
74201 BUG_ON(in_interrupt());
74202 +
74203 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74204 + if (flags & VM_KERNEXEC) {
74205 + if (start != VMALLOC_START || end != VMALLOC_END)
74206 + return NULL;
74207 + start = (unsigned long)MODULES_EXEC_VADDR;
74208 + end = (unsigned long)MODULES_EXEC_END;
74209 + }
74210 +#endif
74211 +
74212 if (flags & VM_IOREMAP) {
74213 int bit = fls(size);
74214
74215 @@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
74216 if (count > totalram_pages)
74217 return NULL;
74218
74219 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74220 + if (!(pgprot_val(prot) & _PAGE_NX))
74221 + flags |= VM_KERNEXEC;
74222 +#endif
74223 +
74224 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74225 __builtin_return_address(0));
74226 if (!area)
74227 @@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74228 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74229 goto fail;
74230
74231 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74232 + if (!(pgprot_val(prot) & _PAGE_NX))
74233 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74234 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74235 + else
74236 +#endif
74237 +
74238 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74239 start, end, node, gfp_mask, caller);
74240 if (!area)
74241 @@ -1825,10 +1887,9 @@ EXPORT_SYMBOL(vzalloc_node);
74242 * For tight control over page level allocator and protection flags
74243 * use __vmalloc() instead.
74244 */
74245 -
74246 void *vmalloc_exec(unsigned long size)
74247 {
74248 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74249 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74250 -1, __builtin_return_address(0));
74251 }
74252
74253 @@ -2123,6 +2184,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74254 unsigned long uaddr = vma->vm_start;
74255 unsigned long usize = vma->vm_end - vma->vm_start;
74256
74257 + BUG_ON(vma->vm_mirror);
74258 +
74259 if ((PAGE_SIZE-1) & (unsigned long)addr)
74260 return -EINVAL;
74261
74262 diff --git a/mm/vmstat.c b/mm/vmstat.c
74263 index f600557..1459fc8 100644
74264 --- a/mm/vmstat.c
74265 +++ b/mm/vmstat.c
74266 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74267 *
74268 * vm_stat contains the global counters
74269 */
74270 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74271 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74272 EXPORT_SYMBOL(vm_stat);
74273
74274 #ifdef CONFIG_SMP
74275 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74276 v = p->vm_stat_diff[i];
74277 p->vm_stat_diff[i] = 0;
74278 local_irq_restore(flags);
74279 - atomic_long_add(v, &zone->vm_stat[i]);
74280 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74281 global_diff[i] += v;
74282 #ifdef CONFIG_NUMA
74283 /* 3 seconds idle till flush */
74284 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74285
74286 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74287 if (global_diff[i])
74288 - atomic_long_add(global_diff[i], &vm_stat[i]);
74289 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74290 }
74291
74292 #endif
74293 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74294 start_cpu_timer(cpu);
74295 #endif
74296 #ifdef CONFIG_PROC_FS
74297 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74298 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74299 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74300 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74301 + {
74302 + mode_t gr_mode = S_IRUGO;
74303 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74304 + gr_mode = S_IRUSR;
74305 +#endif
74306 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74307 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74308 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74309 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74310 +#else
74311 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74312 +#endif
74313 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74314 + }
74315 #endif
74316 return 0;
74317 }
74318 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74319 index efea35b..9c8dd0b 100644
74320 --- a/net/8021q/vlan.c
74321 +++ b/net/8021q/vlan.c
74322 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74323 err = -EPERM;
74324 if (!capable(CAP_NET_ADMIN))
74325 break;
74326 - if ((args.u.name_type >= 0) &&
74327 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74328 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74329 struct vlan_net *vn;
74330
74331 vn = net_generic(net, vlan_net_id);
74332 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74333 index fccae26..e7ece2f 100644
74334 --- a/net/9p/trans_fd.c
74335 +++ b/net/9p/trans_fd.c
74336 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74337 oldfs = get_fs();
74338 set_fs(get_ds());
74339 /* The cast to a user pointer is valid due to the set_fs() */
74340 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74341 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74342 set_fs(oldfs);
74343
74344 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74345 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74346 index 876fbe8..8bbea9f 100644
74347 --- a/net/atm/atm_misc.c
74348 +++ b/net/atm/atm_misc.c
74349 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74350 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74351 return 1;
74352 atm_return(vcc, truesize);
74353 - atomic_inc(&vcc->stats->rx_drop);
74354 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74355 return 0;
74356 }
74357 EXPORT_SYMBOL(atm_charge);
74358 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74359 }
74360 }
74361 atm_return(vcc, guess);
74362 - atomic_inc(&vcc->stats->rx_drop);
74363 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74364 return NULL;
74365 }
74366 EXPORT_SYMBOL(atm_alloc_charge);
74367 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74368
74369 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74370 {
74371 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74372 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74373 __SONET_ITEMS
74374 #undef __HANDLE_ITEM
74375 }
74376 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74377
74378 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74379 {
74380 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74381 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74382 __SONET_ITEMS
74383 #undef __HANDLE_ITEM
74384 }
74385 diff --git a/net/atm/lec.h b/net/atm/lec.h
74386 index dfc0719..47c5322 100644
74387 --- a/net/atm/lec.h
74388 +++ b/net/atm/lec.h
74389 @@ -48,7 +48,7 @@ struct lane2_ops {
74390 const u8 *tlvs, u32 sizeoftlvs);
74391 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74392 const u8 *tlvs, u32 sizeoftlvs);
74393 -};
74394 +} __no_const;
74395
74396 /*
74397 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74398 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74399 index 0919a88..a23d54e 100644
74400 --- a/net/atm/mpc.h
74401 +++ b/net/atm/mpc.h
74402 @@ -33,7 +33,7 @@ struct mpoa_client {
74403 struct mpc_parameters parameters; /* parameters for this client */
74404
74405 const struct net_device_ops *old_ops;
74406 - struct net_device_ops new_ops;
74407 + net_device_ops_no_const new_ops;
74408 };
74409
74410
74411 diff --git a/net/atm/proc.c b/net/atm/proc.c
74412 index 0d020de..011c7bb 100644
74413 --- a/net/atm/proc.c
74414 +++ b/net/atm/proc.c
74415 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74416 const struct k_atm_aal_stats *stats)
74417 {
74418 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74419 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74420 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74421 - atomic_read(&stats->rx_drop));
74422 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74423 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74424 + atomic_read_unchecked(&stats->rx_drop));
74425 }
74426
74427 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74428 diff --git a/net/atm/resources.c b/net/atm/resources.c
74429 index 23f45ce..c748f1a 100644
74430 --- a/net/atm/resources.c
74431 +++ b/net/atm/resources.c
74432 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74433 static void copy_aal_stats(struct k_atm_aal_stats *from,
74434 struct atm_aal_stats *to)
74435 {
74436 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74437 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74438 __AAL_STAT_ITEMS
74439 #undef __HANDLE_ITEM
74440 }
74441 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74442 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74443 struct atm_aal_stats *to)
74444 {
74445 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74446 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74447 __AAL_STAT_ITEMS
74448 #undef __HANDLE_ITEM
74449 }
74450 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74451 index 3512e25..2b33401 100644
74452 --- a/net/batman-adv/bat_iv_ogm.c
74453 +++ b/net/batman-adv/bat_iv_ogm.c
74454 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74455
74456 /* change sequence number to network order */
74457 batman_ogm_packet->seqno =
74458 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74459 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74460
74461 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74462 batman_ogm_packet->tt_crc = htons((uint16_t)
74463 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74464 else
74465 batman_ogm_packet->gw_flags = NO_FLAGS;
74466
74467 - atomic_inc(&hard_iface->seqno);
74468 + atomic_inc_unchecked(&hard_iface->seqno);
74469
74470 slide_own_bcast_window(hard_iface);
74471 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74472 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
74473 return;
74474
74475 /* could be changed by schedule_own_packet() */
74476 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74477 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74478
74479 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74480
74481 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74482 index 7704df4..beb4e16 100644
74483 --- a/net/batman-adv/hard-interface.c
74484 +++ b/net/batman-adv/hard-interface.c
74485 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74486 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74487 dev_add_pack(&hard_iface->batman_adv_ptype);
74488
74489 - atomic_set(&hard_iface->seqno, 1);
74490 - atomic_set(&hard_iface->frag_seqno, 1);
74491 + atomic_set_unchecked(&hard_iface->seqno, 1);
74492 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74493 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74494 hard_iface->net_dev->name);
74495
74496 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74497 index 987c75a..20d6f36 100644
74498 --- a/net/batman-adv/soft-interface.c
74499 +++ b/net/batman-adv/soft-interface.c
74500 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74501
74502 /* set broadcast sequence number */
74503 bcast_packet->seqno =
74504 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74505 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74506
74507 add_bcast_packet_to_list(bat_priv, skb, 1);
74508
74509 @@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
74510 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74511
74512 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74513 - atomic_set(&bat_priv->bcast_seqno, 1);
74514 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74515 atomic_set(&bat_priv->ttvn, 0);
74516 atomic_set(&bat_priv->tt_local_changes, 0);
74517 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74518 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74519 index e9eb043..d174eeb 100644
74520 --- a/net/batman-adv/types.h
74521 +++ b/net/batman-adv/types.h
74522 @@ -38,8 +38,8 @@ struct hard_iface {
74523 int16_t if_num;
74524 char if_status;
74525 struct net_device *net_dev;
74526 - atomic_t seqno;
74527 - atomic_t frag_seqno;
74528 + atomic_unchecked_t seqno;
74529 + atomic_unchecked_t frag_seqno;
74530 unsigned char *packet_buff;
74531 int packet_len;
74532 struct kobject *hardif_obj;
74533 @@ -154,7 +154,7 @@ struct bat_priv {
74534 atomic_t orig_interval; /* uint */
74535 atomic_t hop_penalty; /* uint */
74536 atomic_t log_level; /* uint */
74537 - atomic_t bcast_seqno;
74538 + atomic_unchecked_t bcast_seqno;
74539 atomic_t bcast_queue_left;
74540 atomic_t batman_queue_left;
74541 atomic_t ttvn; /* translation table version number */
74542 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74543 index 07d1c1d..7e9bea9 100644
74544 --- a/net/batman-adv/unicast.c
74545 +++ b/net/batman-adv/unicast.c
74546 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74547 frag1->flags = UNI_FRAG_HEAD | large_tail;
74548 frag2->flags = large_tail;
74549
74550 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74551 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74552 frag1->seqno = htons(seqno - 1);
74553 frag2->seqno = htons(seqno);
74554
74555 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74556 index 280953b..cd219bb 100644
74557 --- a/net/bluetooth/hci_conn.c
74558 +++ b/net/bluetooth/hci_conn.c
74559 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74560 memset(&cp, 0, sizeof(cp));
74561
74562 cp.handle = cpu_to_le16(conn->handle);
74563 - memcpy(cp.ltk, ltk, sizeof(ltk));
74564 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74565
74566 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74567 }
74568 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74569 index 32d338c..d24bcdb 100644
74570 --- a/net/bluetooth/l2cap_core.c
74571 +++ b/net/bluetooth/l2cap_core.c
74572 @@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74573 break;
74574
74575 case L2CAP_CONF_RFC:
74576 - if (olen == sizeof(rfc))
74577 - memcpy(&rfc, (void *)val, olen);
74578 + if (olen != sizeof(rfc))
74579 + break;
74580 +
74581 + memcpy(&rfc, (void *)val, olen);
74582
74583 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74584 rfc.mode != chan->mode)
74585 @@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74586
74587 switch (type) {
74588 case L2CAP_CONF_RFC:
74589 - if (olen == sizeof(rfc))
74590 - memcpy(&rfc, (void *)val, olen);
74591 + if (olen != sizeof(rfc))
74592 + break;
74593 +
74594 + memcpy(&rfc, (void *)val, olen);
74595 goto done;
74596 }
74597 }
74598 diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
74599 index 5449294..7da9a5f 100644
74600 --- a/net/bridge/netfilter/ebt_ulog.c
74601 +++ b/net/bridge/netfilter/ebt_ulog.c
74602 @@ -96,6 +96,7 @@ static void ulog_timer(unsigned long data)
74603 spin_unlock_bh(&ulog_buffers[data].lock);
74604 }
74605
74606 +static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
74607 static struct sk_buff *ulog_alloc_skb(unsigned int size)
74608 {
74609 struct sk_buff *skb;
74610 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74611 index 5fe2ff3..10968b5 100644
74612 --- a/net/bridge/netfilter/ebtables.c
74613 +++ b/net/bridge/netfilter/ebtables.c
74614 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74615 tmp.valid_hooks = t->table->valid_hooks;
74616 }
74617 mutex_unlock(&ebt_mutex);
74618 - if (copy_to_user(user, &tmp, *len) != 0){
74619 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74620 BUGPRINT("c2u Didn't work\n");
74621 ret = -EFAULT;
74622 break;
74623 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74624 index a97d97a..6f679ed 100644
74625 --- a/net/caif/caif_socket.c
74626 +++ b/net/caif/caif_socket.c
74627 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74628 #ifdef CONFIG_DEBUG_FS
74629 struct debug_fs_counter {
74630 atomic_t caif_nr_socks;
74631 - atomic_t caif_sock_create;
74632 - atomic_t num_connect_req;
74633 - atomic_t num_connect_resp;
74634 - atomic_t num_connect_fail_resp;
74635 - atomic_t num_disconnect;
74636 - atomic_t num_remote_shutdown_ind;
74637 - atomic_t num_tx_flow_off_ind;
74638 - atomic_t num_tx_flow_on_ind;
74639 - atomic_t num_rx_flow_off;
74640 - atomic_t num_rx_flow_on;
74641 + atomic_unchecked_t caif_sock_create;
74642 + atomic_unchecked_t num_connect_req;
74643 + atomic_unchecked_t num_connect_resp;
74644 + atomic_unchecked_t num_connect_fail_resp;
74645 + atomic_unchecked_t num_disconnect;
74646 + atomic_unchecked_t num_remote_shutdown_ind;
74647 + atomic_unchecked_t num_tx_flow_off_ind;
74648 + atomic_unchecked_t num_tx_flow_on_ind;
74649 + atomic_unchecked_t num_rx_flow_off;
74650 + atomic_unchecked_t num_rx_flow_on;
74651 };
74652 static struct debug_fs_counter cnt;
74653 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74654 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74655 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74656 #else
74657 #define dbfs_atomic_inc(v) 0
74658 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74659 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74660 sk_rcvbuf_lowwater(cf_sk));
74661 set_rx_flow_off(cf_sk);
74662 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74663 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74664 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74665 }
74666
74667 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74668 set_rx_flow_off(cf_sk);
74669 if (net_ratelimit())
74670 pr_debug("sending flow OFF due to rmem_schedule\n");
74671 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74672 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74673 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74674 }
74675 skb->dev = NULL;
74676 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74677 switch (flow) {
74678 case CAIF_CTRLCMD_FLOW_ON_IND:
74679 /* OK from modem to start sending again */
74680 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74681 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74682 set_tx_flow_on(cf_sk);
74683 cf_sk->sk.sk_state_change(&cf_sk->sk);
74684 break;
74685
74686 case CAIF_CTRLCMD_FLOW_OFF_IND:
74687 /* Modem asks us to shut up */
74688 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74689 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74690 set_tx_flow_off(cf_sk);
74691 cf_sk->sk.sk_state_change(&cf_sk->sk);
74692 break;
74693 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74694 /* We're now connected */
74695 caif_client_register_refcnt(&cf_sk->layer,
74696 cfsk_hold, cfsk_put);
74697 - dbfs_atomic_inc(&cnt.num_connect_resp);
74698 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74699 cf_sk->sk.sk_state = CAIF_CONNECTED;
74700 set_tx_flow_on(cf_sk);
74701 cf_sk->sk.sk_state_change(&cf_sk->sk);
74702 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74703
74704 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74705 /* Connect request failed */
74706 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74707 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74708 cf_sk->sk.sk_err = ECONNREFUSED;
74709 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74710 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74711 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74712
74713 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74714 /* Modem has closed this connection, or device is down. */
74715 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74716 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74717 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74718 cf_sk->sk.sk_err = ECONNRESET;
74719 set_rx_flow_on(cf_sk);
74720 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74721 return;
74722
74723 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74724 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74725 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74726 set_rx_flow_on(cf_sk);
74727 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74728 }
74729 @@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74730 /*ifindex = id of the interface.*/
74731 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74732
74733 - dbfs_atomic_inc(&cnt.num_connect_req);
74734 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74735 cf_sk->layer.receive = caif_sktrecv_cb;
74736
74737 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74738 @@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
74739 spin_unlock_bh(&sk->sk_receive_queue.lock);
74740 sock->sk = NULL;
74741
74742 - dbfs_atomic_inc(&cnt.num_disconnect);
74743 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74744
74745 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74746 if (cf_sk->debugfs_socket_dir != NULL)
74747 @@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74748 cf_sk->conn_req.protocol = protocol;
74749 /* Increase the number of sockets created. */
74750 dbfs_atomic_inc(&cnt.caif_nr_socks);
74751 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74752 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74753 #ifdef CONFIG_DEBUG_FS
74754 if (!IS_ERR(debugfsdir)) {
74755
74756 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74757 index 5cf5222..6f704ad 100644
74758 --- a/net/caif/cfctrl.c
74759 +++ b/net/caif/cfctrl.c
74760 @@ -9,6 +9,7 @@
74761 #include <linux/stddef.h>
74762 #include <linux/spinlock.h>
74763 #include <linux/slab.h>
74764 +#include <linux/sched.h>
74765 #include <net/caif/caif_layer.h>
74766 #include <net/caif/cfpkt.h>
74767 #include <net/caif/cfctrl.h>
74768 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74769 memset(&dev_info, 0, sizeof(dev_info));
74770 dev_info.id = 0xff;
74771 cfsrvl_init(&this->serv, 0, &dev_info, false);
74772 - atomic_set(&this->req_seq_no, 1);
74773 - atomic_set(&this->rsp_seq_no, 1);
74774 + atomic_set_unchecked(&this->req_seq_no, 1);
74775 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74776 this->serv.layer.receive = cfctrl_recv;
74777 sprintf(this->serv.layer.name, "ctrl");
74778 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74779 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74780 struct cfctrl_request_info *req)
74781 {
74782 spin_lock_bh(&ctrl->info_list_lock);
74783 - atomic_inc(&ctrl->req_seq_no);
74784 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74785 + atomic_inc_unchecked(&ctrl->req_seq_no);
74786 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74787 list_add_tail(&req->list, &ctrl->list);
74788 spin_unlock_bh(&ctrl->info_list_lock);
74789 }
74790 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74791 if (p != first)
74792 pr_warn("Requests are not received in order\n");
74793
74794 - atomic_set(&ctrl->rsp_seq_no,
74795 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74796 p->sequence_no);
74797 list_del(&p->list);
74798 goto out;
74799 diff --git a/net/can/gw.c b/net/can/gw.c
74800 index 3d79b12..8de85fa 100644
74801 --- a/net/can/gw.c
74802 +++ b/net/can/gw.c
74803 @@ -96,7 +96,7 @@ struct cf_mod {
74804 struct {
74805 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74806 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74807 - } csumfunc;
74808 + } __no_const csumfunc;
74809 };
74810
74811
74812 diff --git a/net/compat.c b/net/compat.c
74813 index 6def90e..c6992fa 100644
74814 --- a/net/compat.c
74815 +++ b/net/compat.c
74816 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74817 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74818 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74819 return -EFAULT;
74820 - kmsg->msg_name = compat_ptr(tmp1);
74821 - kmsg->msg_iov = compat_ptr(tmp2);
74822 - kmsg->msg_control = compat_ptr(tmp3);
74823 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74824 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74825 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74826 return 0;
74827 }
74828
74829 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74830
74831 if (kern_msg->msg_namelen) {
74832 if (mode == VERIFY_READ) {
74833 - int err = move_addr_to_kernel(kern_msg->msg_name,
74834 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74835 kern_msg->msg_namelen,
74836 kern_address);
74837 if (err < 0)
74838 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74839 kern_msg->msg_name = NULL;
74840
74841 tot_len = iov_from_user_compat_to_kern(kern_iov,
74842 - (struct compat_iovec __user *)kern_msg->msg_iov,
74843 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74844 kern_msg->msg_iovlen);
74845 if (tot_len >= 0)
74846 kern_msg->msg_iov = kern_iov;
74847 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74848
74849 #define CMSG_COMPAT_FIRSTHDR(msg) \
74850 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74851 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74852 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74853 (struct compat_cmsghdr __user *)NULL)
74854
74855 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74856 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74857 (ucmlen) <= (unsigned long) \
74858 ((mhdr)->msg_controllen - \
74859 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74860 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74861
74862 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74863 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74864 {
74865 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74866 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74867 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74868 msg->msg_controllen)
74869 return NULL;
74870 return (struct compat_cmsghdr __user *)ptr;
74871 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74872 {
74873 struct compat_timeval ctv;
74874 struct compat_timespec cts[3];
74875 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74876 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74877 struct compat_cmsghdr cmhdr;
74878 int cmlen;
74879
74880 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74881
74882 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74883 {
74884 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74885 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74886 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74887 int fdnum = scm->fp->count;
74888 struct file **fp = scm->fp->fp;
74889 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74890 return -EFAULT;
74891 old_fs = get_fs();
74892 set_fs(KERNEL_DS);
74893 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74894 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74895 set_fs(old_fs);
74896
74897 return err;
74898 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74899 len = sizeof(ktime);
74900 old_fs = get_fs();
74901 set_fs(KERNEL_DS);
74902 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74903 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74904 set_fs(old_fs);
74905
74906 if (!err) {
74907 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74908 case MCAST_JOIN_GROUP:
74909 case MCAST_LEAVE_GROUP:
74910 {
74911 - struct compat_group_req __user *gr32 = (void *)optval;
74912 + struct compat_group_req __user *gr32 = (void __user *)optval;
74913 struct group_req __user *kgr =
74914 compat_alloc_user_space(sizeof(struct group_req));
74915 u32 interface;
74916 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74917 case MCAST_BLOCK_SOURCE:
74918 case MCAST_UNBLOCK_SOURCE:
74919 {
74920 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74921 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74922 struct group_source_req __user *kgsr = compat_alloc_user_space(
74923 sizeof(struct group_source_req));
74924 u32 interface;
74925 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74926 }
74927 case MCAST_MSFILTER:
74928 {
74929 - struct compat_group_filter __user *gf32 = (void *)optval;
74930 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74931 struct group_filter __user *kgf;
74932 u32 interface, fmode, numsrc;
74933
74934 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74935 char __user *optval, int __user *optlen,
74936 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74937 {
74938 - struct compat_group_filter __user *gf32 = (void *)optval;
74939 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74940 struct group_filter __user *kgf;
74941 int __user *koptlen;
74942 u32 interface, fmode, numsrc;
74943 diff --git a/net/core/datagram.c b/net/core/datagram.c
74944 index 68bbf9f..5ef0d12 100644
74945 --- a/net/core/datagram.c
74946 +++ b/net/core/datagram.c
74947 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74948 }
74949
74950 kfree_skb(skb);
74951 - atomic_inc(&sk->sk_drops);
74952 + atomic_inc_unchecked(&sk->sk_drops);
74953 sk_mem_reclaim_partial(sk);
74954
74955 return err;
74956 diff --git a/net/core/dev.c b/net/core/dev.c
74957 index a4bf943..9c83051 100644
74958 --- a/net/core/dev.c
74959 +++ b/net/core/dev.c
74960 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
74961 if (no_module && capable(CAP_NET_ADMIN))
74962 no_module = request_module("netdev-%s", name);
74963 if (no_module && capable(CAP_SYS_MODULE)) {
74964 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74965 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74966 +#else
74967 if (!request_module("%s", name))
74968 pr_err("Loading kernel module for a network device "
74969 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74970 "instead\n", name);
74971 +#endif
74972 }
74973 }
74974 EXPORT_SYMBOL(dev_load);
74975 @@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74976 {
74977 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74978 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74979 - atomic_long_inc(&dev->rx_dropped);
74980 + atomic_long_inc_unchecked(&dev->rx_dropped);
74981 kfree_skb(skb);
74982 return NET_RX_DROP;
74983 }
74984 @@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74985 nf_reset(skb);
74986
74987 if (unlikely(!is_skb_forwardable(dev, skb))) {
74988 - atomic_long_inc(&dev->rx_dropped);
74989 + atomic_long_inc_unchecked(&dev->rx_dropped);
74990 kfree_skb(skb);
74991 return NET_RX_DROP;
74992 }
74993 @@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74994
74995 struct dev_gso_cb {
74996 void (*destructor)(struct sk_buff *skb);
74997 -};
74998 +} __no_const;
74999
75000 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75001
75002 @@ -2913,7 +2917,7 @@ enqueue:
75003
75004 local_irq_restore(flags);
75005
75006 - atomic_long_inc(&skb->dev->rx_dropped);
75007 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75008 kfree_skb(skb);
75009 return NET_RX_DROP;
75010 }
75011 @@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
75012 }
75013 EXPORT_SYMBOL(netif_rx_ni);
75014
75015 -static void net_tx_action(struct softirq_action *h)
75016 +static void net_tx_action(void)
75017 {
75018 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75019
75020 @@ -3273,7 +3277,7 @@ ncls:
75021 if (pt_prev) {
75022 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75023 } else {
75024 - atomic_long_inc(&skb->dev->rx_dropped);
75025 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75026 kfree_skb(skb);
75027 /* Jamal, now you will not able to escape explaining
75028 * me how you were going to use this. :-)
75029 @@ -3833,7 +3837,7 @@ void netif_napi_del(struct napi_struct *napi)
75030 }
75031 EXPORT_SYMBOL(netif_napi_del);
75032
75033 -static void net_rx_action(struct softirq_action *h)
75034 +static void net_rx_action(void)
75035 {
75036 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75037 unsigned long time_limit = jiffies + 2;
75038 @@ -5890,7 +5894,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
75039 } else {
75040 netdev_stats_to_stats64(storage, &dev->stats);
75041 }
75042 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75043 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75044 return storage;
75045 }
75046 EXPORT_SYMBOL(dev_get_stats);
75047 diff --git a/net/core/flow.c b/net/core/flow.c
75048 index e318c7e..168b1d0 100644
75049 --- a/net/core/flow.c
75050 +++ b/net/core/flow.c
75051 @@ -61,7 +61,7 @@ struct flow_cache {
75052 struct timer_list rnd_timer;
75053 };
75054
75055 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
75056 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75057 EXPORT_SYMBOL(flow_cache_genid);
75058 static struct flow_cache flow_cache_global;
75059 static struct kmem_cache *flow_cachep __read_mostly;
75060 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75061
75062 static int flow_entry_valid(struct flow_cache_entry *fle)
75063 {
75064 - if (atomic_read(&flow_cache_genid) != fle->genid)
75065 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75066 return 0;
75067 if (fle->object && !fle->object->ops->check(fle->object))
75068 return 0;
75069 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75070 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75071 fcp->hash_count++;
75072 }
75073 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75074 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75075 flo = fle->object;
75076 if (!flo)
75077 goto ret_object;
75078 @@ -280,7 +280,7 @@ nocache:
75079 }
75080 flo = resolver(net, key, family, dir, flo, ctx);
75081 if (fle) {
75082 - fle->genid = atomic_read(&flow_cache_genid);
75083 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
75084 if (!IS_ERR(flo))
75085 fle->object = flo;
75086 else
75087 diff --git a/net/core/iovec.c b/net/core/iovec.c
75088 index c40f27e..7f49254 100644
75089 --- a/net/core/iovec.c
75090 +++ b/net/core/iovec.c
75091 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75092 if (m->msg_namelen) {
75093 if (mode == VERIFY_READ) {
75094 void __user *namep;
75095 - namep = (void __user __force *) m->msg_name;
75096 + namep = (void __force_user *) m->msg_name;
75097 err = move_addr_to_kernel(namep, m->msg_namelen,
75098 address);
75099 if (err < 0)
75100 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75101 }
75102
75103 size = m->msg_iovlen * sizeof(struct iovec);
75104 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75105 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75106 return -EFAULT;
75107
75108 m->msg_iov = iov;
75109 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75110 index 5c30296..ebe7b61 100644
75111 --- a/net/core/rtnetlink.c
75112 +++ b/net/core/rtnetlink.c
75113 @@ -57,7 +57,7 @@ struct rtnl_link {
75114 rtnl_doit_func doit;
75115 rtnl_dumpit_func dumpit;
75116 rtnl_calcit_func calcit;
75117 -};
75118 +} __no_const;
75119
75120 static DEFINE_MUTEX(rtnl_mutex);
75121
75122 diff --git a/net/core/scm.c b/net/core/scm.c
75123 index ff52ad0..aff1c0f 100644
75124 --- a/net/core/scm.c
75125 +++ b/net/core/scm.c
75126 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
75127 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75128 {
75129 struct cmsghdr __user *cm
75130 - = (__force struct cmsghdr __user *)msg->msg_control;
75131 + = (struct cmsghdr __force_user *)msg->msg_control;
75132 struct cmsghdr cmhdr;
75133 int cmlen = CMSG_LEN(len);
75134 int err;
75135 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75136 err = -EFAULT;
75137 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75138 goto out;
75139 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75140 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75141 goto out;
75142 cmlen = CMSG_SPACE(len);
75143 if (msg->msg_controllen < cmlen)
75144 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
75145 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75146 {
75147 struct cmsghdr __user *cm
75148 - = (__force struct cmsghdr __user*)msg->msg_control;
75149 + = (struct cmsghdr __force_user *)msg->msg_control;
75150
75151 int fdmax = 0;
75152 int fdnum = scm->fp->count;
75153 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75154 if (fdnum < fdmax)
75155 fdmax = fdnum;
75156
75157 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75158 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75159 i++, cmfptr++)
75160 {
75161 int new_fd;
75162 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
75163 index da0c97f..8253632 100644
75164 --- a/net/core/skbuff.c
75165 +++ b/net/core/skbuff.c
75166 @@ -3160,6 +3160,8 @@ static void sock_rmem_free(struct sk_buff *skb)
75167 */
75168 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
75169 {
75170 + int len = skb->len;
75171 +
75172 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75173 (unsigned)sk->sk_rcvbuf)
75174 return -ENOMEM;
75175 @@ -3174,7 +3176,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
75176
75177 skb_queue_tail(&sk->sk_error_queue, skb);
75178 if (!sock_flag(sk, SOCK_DEAD))
75179 - sk->sk_data_ready(sk, skb->len);
75180 + sk->sk_data_ready(sk, len);
75181 return 0;
75182 }
75183 EXPORT_SYMBOL(sock_queue_err_skb);
75184 diff --git a/net/core/sock.c b/net/core/sock.c
75185 index 02f8dfe..86dfd4a 100644
75186 --- a/net/core/sock.c
75187 +++ b/net/core/sock.c
75188 @@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75189 struct sk_buff_head *list = &sk->sk_receive_queue;
75190
75191 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75192 - atomic_inc(&sk->sk_drops);
75193 + atomic_inc_unchecked(&sk->sk_drops);
75194 trace_sock_rcvqueue_full(sk, skb);
75195 return -ENOMEM;
75196 }
75197 @@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75198 return err;
75199
75200 if (!sk_rmem_schedule(sk, skb->truesize)) {
75201 - atomic_inc(&sk->sk_drops);
75202 + atomic_inc_unchecked(&sk->sk_drops);
75203 return -ENOBUFS;
75204 }
75205
75206 @@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75207 skb_dst_force(skb);
75208
75209 spin_lock_irqsave(&list->lock, flags);
75210 - skb->dropcount = atomic_read(&sk->sk_drops);
75211 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75212 __skb_queue_tail(list, skb);
75213 spin_unlock_irqrestore(&list->lock, flags);
75214
75215 @@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75216 skb->dev = NULL;
75217
75218 if (sk_rcvqueues_full(sk, skb)) {
75219 - atomic_inc(&sk->sk_drops);
75220 + atomic_inc_unchecked(&sk->sk_drops);
75221 goto discard_and_relse;
75222 }
75223 if (nested)
75224 @@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75225 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75226 } else if (sk_add_backlog(sk, skb)) {
75227 bh_unlock_sock(sk);
75228 - atomic_inc(&sk->sk_drops);
75229 + atomic_inc_unchecked(&sk->sk_drops);
75230 goto discard_and_relse;
75231 }
75232
75233 @@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75234 if (len > sizeof(peercred))
75235 len = sizeof(peercred);
75236 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75237 - if (copy_to_user(optval, &peercred, len))
75238 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75239 return -EFAULT;
75240 goto lenout;
75241 }
75242 @@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75243 return -ENOTCONN;
75244 if (lv < len)
75245 return -EINVAL;
75246 - if (copy_to_user(optval, address, len))
75247 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75248 return -EFAULT;
75249 goto lenout;
75250 }
75251 @@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75252
75253 if (len > lv)
75254 len = lv;
75255 - if (copy_to_user(optval, &v, len))
75256 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75257 return -EFAULT;
75258 lenout:
75259 if (put_user(len, optlen))
75260 @@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75261 */
75262 smp_wmb();
75263 atomic_set(&sk->sk_refcnt, 1);
75264 - atomic_set(&sk->sk_drops, 0);
75265 + atomic_set_unchecked(&sk->sk_drops, 0);
75266 }
75267 EXPORT_SYMBOL(sock_init_data);
75268
75269 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75270 index b9868e1..849f809 100644
75271 --- a/net/core/sock_diag.c
75272 +++ b/net/core/sock_diag.c
75273 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75274
75275 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75276 {
75277 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75278 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75279 cookie[1] != INET_DIAG_NOCOOKIE) &&
75280 ((u32)(unsigned long)sk != cookie[0] ||
75281 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75282 return -ESTALE;
75283 else
75284 +#endif
75285 return 0;
75286 }
75287 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75288
75289 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75290 {
75291 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75292 + cookie[0] = 0;
75293 + cookie[1] = 0;
75294 +#else
75295 cookie[0] = (u32)(unsigned long)sk;
75296 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75297 +#endif
75298 }
75299 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75300
75301 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75302 index 02e75d1..9a57a7c 100644
75303 --- a/net/decnet/sysctl_net_decnet.c
75304 +++ b/net/decnet/sysctl_net_decnet.c
75305 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75306
75307 if (len > *lenp) len = *lenp;
75308
75309 - if (copy_to_user(buffer, addr, len))
75310 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75311 return -EFAULT;
75312
75313 *lenp = len;
75314 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75315
75316 if (len > *lenp) len = *lenp;
75317
75318 - if (copy_to_user(buffer, devname, len))
75319 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75320 return -EFAULT;
75321
75322 *lenp = len;
75323 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75324 index 39a2d29..f39c0fe 100644
75325 --- a/net/econet/Kconfig
75326 +++ b/net/econet/Kconfig
75327 @@ -4,7 +4,7 @@
75328
75329 config ECONET
75330 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75331 - depends on EXPERIMENTAL && INET
75332 + depends on EXPERIMENTAL && INET && BROKEN
75333 ---help---
75334 Econet is a fairly old and slow networking protocol mainly used by
75335 Acorn computers to access file and print servers. It uses native
75336 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
75337 index 36d1440..44ff28b 100644
75338 --- a/net/ipv4/ah4.c
75339 +++ b/net/ipv4/ah4.c
75340 @@ -19,6 +19,8 @@ struct ah_skb_cb {
75341 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
75342
75343 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75344 + unsigned int size) __size_overflow(3);
75345 +static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75346 unsigned int size)
75347 {
75348 unsigned int len;
75349 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75350 index 92fc5f6..b790d91 100644
75351 --- a/net/ipv4/fib_frontend.c
75352 +++ b/net/ipv4/fib_frontend.c
75353 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75354 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75355 fib_sync_up(dev);
75356 #endif
75357 - atomic_inc(&net->ipv4.dev_addr_genid);
75358 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75359 rt_cache_flush(dev_net(dev), -1);
75360 break;
75361 case NETDEV_DOWN:
75362 fib_del_ifaddr(ifa, NULL);
75363 - atomic_inc(&net->ipv4.dev_addr_genid);
75364 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75365 if (ifa->ifa_dev->ifa_list == NULL) {
75366 /* Last address was deleted from this interface.
75367 * Disable IP.
75368 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75369 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75370 fib_sync_up(dev);
75371 #endif
75372 - atomic_inc(&net->ipv4.dev_addr_genid);
75373 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75374 rt_cache_flush(dev_net(dev), -1);
75375 break;
75376 case NETDEV_DOWN:
75377 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75378 index 80106d8..232e898 100644
75379 --- a/net/ipv4/fib_semantics.c
75380 +++ b/net/ipv4/fib_semantics.c
75381 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75382 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75383 nh->nh_gw,
75384 nh->nh_parent->fib_scope);
75385 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75386 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75387
75388 return nh->nh_saddr;
75389 }
75390 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75391 index 984ec65..97ac518 100644
75392 --- a/net/ipv4/inet_hashtables.c
75393 +++ b/net/ipv4/inet_hashtables.c
75394 @@ -18,12 +18,15 @@
75395 #include <linux/sched.h>
75396 #include <linux/slab.h>
75397 #include <linux/wait.h>
75398 +#include <linux/security.h>
75399
75400 #include <net/inet_connection_sock.h>
75401 #include <net/inet_hashtables.h>
75402 #include <net/secure_seq.h>
75403 #include <net/ip.h>
75404
75405 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75406 +
75407 /*
75408 * Allocate and initialize a new local port bind bucket.
75409 * The bindhash mutex for snum's hash chain must be held here.
75410 @@ -530,6 +533,8 @@ ok:
75411 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75412 spin_unlock(&head->lock);
75413
75414 + gr_update_task_in_ip_table(current, inet_sk(sk));
75415 +
75416 if (tw) {
75417 inet_twsk_deschedule(tw, death_row);
75418 while (twrefcnt) {
75419 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75420 index d4d61b6..b81aec8 100644
75421 --- a/net/ipv4/inetpeer.c
75422 +++ b/net/ipv4/inetpeer.c
75423 @@ -487,8 +487,8 @@ relookup:
75424 if (p) {
75425 p->daddr = *daddr;
75426 atomic_set(&p->refcnt, 1);
75427 - atomic_set(&p->rid, 0);
75428 - atomic_set(&p->ip_id_count,
75429 + atomic_set_unchecked(&p->rid, 0);
75430 + atomic_set_unchecked(&p->ip_id_count,
75431 (daddr->family == AF_INET) ?
75432 secure_ip_id(daddr->addr.a4) :
75433 secure_ipv6_id(daddr->addr.a6));
75434 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75435 index 1f23a57..7180dfe 100644
75436 --- a/net/ipv4/ip_fragment.c
75437 +++ b/net/ipv4/ip_fragment.c
75438 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75439 return 0;
75440
75441 start = qp->rid;
75442 - end = atomic_inc_return(&peer->rid);
75443 + end = atomic_inc_return_unchecked(&peer->rid);
75444 qp->rid = end;
75445
75446 rc = qp->q.fragments && (end - start) > max;
75447 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75448 index 8aa87c1..35c3248 100644
75449 --- a/net/ipv4/ip_sockglue.c
75450 +++ b/net/ipv4/ip_sockglue.c
75451 @@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75452 len = min_t(unsigned int, len, opt->optlen);
75453 if (put_user(len, optlen))
75454 return -EFAULT;
75455 - if (copy_to_user(optval, opt->__data, len))
75456 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75457 + copy_to_user(optval, opt->__data, len))
75458 return -EFAULT;
75459 return 0;
75460 }
75461 @@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75462 if (sk->sk_type != SOCK_STREAM)
75463 return -ENOPROTOOPT;
75464
75465 - msg.msg_control = optval;
75466 + msg.msg_control = (void __force_kernel *)optval;
75467 msg.msg_controllen = len;
75468 msg.msg_flags = flags;
75469
75470 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75471 index 6e412a6..6640538 100644
75472 --- a/net/ipv4/ipconfig.c
75473 +++ b/net/ipv4/ipconfig.c
75474 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75475
75476 mm_segment_t oldfs = get_fs();
75477 set_fs(get_ds());
75478 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75479 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75480 set_fs(oldfs);
75481 return res;
75482 }
75483 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75484
75485 mm_segment_t oldfs = get_fs();
75486 set_fs(get_ds());
75487 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75488 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75489 set_fs(oldfs);
75490 return res;
75491 }
75492 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75493
75494 mm_segment_t oldfs = get_fs();
75495 set_fs(get_ds());
75496 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75497 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75498 set_fs(oldfs);
75499 return res;
75500 }
75501 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
75502 index fd7a3f6..a1b1013 100644
75503 --- a/net/ipv4/netfilter/arp_tables.c
75504 +++ b/net/ipv4/netfilter/arp_tables.c
75505 @@ -757,6 +757,9 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
75506
75507 static int copy_entries_to_user(unsigned int total_size,
75508 const struct xt_table *table,
75509 + void __user *userptr) __size_overflow(1);
75510 +static int copy_entries_to_user(unsigned int total_size,
75511 + const struct xt_table *table,
75512 void __user *userptr)
75513 {
75514 unsigned int off, num;
75515 @@ -984,6 +987,11 @@ static int __do_replace(struct net *net, const char *name,
75516 unsigned int valid_hooks,
75517 struct xt_table_info *newinfo,
75518 unsigned int num_counters,
75519 + void __user *counters_ptr) __size_overflow(5);
75520 +static int __do_replace(struct net *net, const char *name,
75521 + unsigned int valid_hooks,
75522 + struct xt_table_info *newinfo,
75523 + unsigned int num_counters,
75524 void __user *counters_ptr)
75525 {
75526 int ret;
75527 @@ -1104,6 +1112,8 @@ static int do_replace(struct net *net, const void __user *user,
75528 }
75529
75530 static int do_add_counters(struct net *net, const void __user *user,
75531 + unsigned int len, int compat) __size_overflow(3);
75532 +static int do_add_counters(struct net *net, const void __user *user,
75533 unsigned int len, int compat)
75534 {
75535 unsigned int i, curcpu;
75536 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
75537 index 24e556e..b073356 100644
75538 --- a/net/ipv4/netfilter/ip_tables.c
75539 +++ b/net/ipv4/netfilter/ip_tables.c
75540 @@ -923,6 +923,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
75541 static int
75542 copy_entries_to_user(unsigned int total_size,
75543 const struct xt_table *table,
75544 + void __user *userptr) __size_overflow(1);
75545 +static int
75546 +copy_entries_to_user(unsigned int total_size,
75547 + const struct xt_table *table,
75548 void __user *userptr)
75549 {
75550 unsigned int off, num;
75551 @@ -1172,6 +1176,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
75552 static int
75553 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
75554 struct xt_table_info *newinfo, unsigned int num_counters,
75555 + void __user *counters_ptr) __size_overflow(5);
75556 +static int
75557 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
75558 + struct xt_table_info *newinfo, unsigned int num_counters,
75559 void __user *counters_ptr)
75560 {
75561 int ret;
75562 @@ -1293,6 +1301,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
75563
75564 static int
75565 do_add_counters(struct net *net, const void __user *user,
75566 + unsigned int len, int compat) __size_overflow(3);
75567 +static int
75568 +do_add_counters(struct net *net, const void __user *user,
75569 unsigned int len, int compat)
75570 {
75571 unsigned int i, curcpu;
75572 diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
75573 index ba5756d..8d34d74 100644
75574 --- a/net/ipv4/netfilter/ipt_ULOG.c
75575 +++ b/net/ipv4/netfilter/ipt_ULOG.c
75576 @@ -125,6 +125,7 @@ static void ulog_timer(unsigned long data)
75577 spin_unlock_bh(&ulog_lock);
75578 }
75579
75580 +static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
75581 static struct sk_buff *ulog_alloc_skb(unsigned int size)
75582 {
75583 struct sk_buff *skb;
75584 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75585 index 2133c30..0e8047e 100644
75586 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75587 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75588 @@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
75589 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
75590 unsigned char *eoc,
75591 unsigned long **oid,
75592 + unsigned int *len) __size_overflow(2);
75593 +static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
75594 + unsigned char *eoc,
75595 + unsigned long **oid,
75596 unsigned int *len)
75597 {
75598 unsigned long subid;
75599 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75600 index b072386..abdebcf 100644
75601 --- a/net/ipv4/ping.c
75602 +++ b/net/ipv4/ping.c
75603 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75604 sk_rmem_alloc_get(sp),
75605 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75606 atomic_read(&sp->sk_refcnt), sp,
75607 - atomic_read(&sp->sk_drops), len);
75608 + atomic_read_unchecked(&sp->sk_drops), len);
75609 }
75610
75611 static int ping_seq_show(struct seq_file *seq, void *v)
75612 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75613 index 3ccda5a..3c1e61d 100644
75614 --- a/net/ipv4/raw.c
75615 +++ b/net/ipv4/raw.c
75616 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75617 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75618 {
75619 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75620 - atomic_inc(&sk->sk_drops);
75621 + atomic_inc_unchecked(&sk->sk_drops);
75622 kfree_skb(skb);
75623 return NET_RX_DROP;
75624 }
75625 @@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
75626
75627 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75628 {
75629 + struct icmp_filter filter;
75630 +
75631 if (optlen > sizeof(struct icmp_filter))
75632 optlen = sizeof(struct icmp_filter);
75633 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75634 + if (copy_from_user(&filter, optval, optlen))
75635 return -EFAULT;
75636 + raw_sk(sk)->filter = filter;
75637 return 0;
75638 }
75639
75640 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75641 {
75642 int len, ret = -EFAULT;
75643 + struct icmp_filter filter;
75644
75645 if (get_user(len, optlen))
75646 goto out;
75647 @@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75648 if (len > sizeof(struct icmp_filter))
75649 len = sizeof(struct icmp_filter);
75650 ret = -EFAULT;
75651 - if (put_user(len, optlen) ||
75652 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75653 + filter = raw_sk(sk)->filter;
75654 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75655 goto out;
75656 ret = 0;
75657 out: return ret;
75658 @@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75659 sk_wmem_alloc_get(sp),
75660 sk_rmem_alloc_get(sp),
75661 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75662 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75663 + atomic_read(&sp->sk_refcnt),
75664 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75665 + NULL,
75666 +#else
75667 + sp,
75668 +#endif
75669 + atomic_read_unchecked(&sp->sk_drops));
75670 }
75671
75672 static int raw_seq_show(struct seq_file *seq, void *v)
75673 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75674 index 0197747..7adb0dc 100644
75675 --- a/net/ipv4/route.c
75676 +++ b/net/ipv4/route.c
75677 @@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75678
75679 static inline int rt_genid(struct net *net)
75680 {
75681 - return atomic_read(&net->ipv4.rt_genid);
75682 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75683 }
75684
75685 #ifdef CONFIG_PROC_FS
75686 @@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
75687 unsigned char shuffle;
75688
75689 get_random_bytes(&shuffle, sizeof(shuffle));
75690 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75691 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75692 inetpeer_invalidate_tree(AF_INET);
75693 }
75694
75695 @@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
75696 error = rt->dst.error;
75697 if (peer) {
75698 inet_peer_refcheck(rt->peer);
75699 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75700 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75701 if (peer->tcp_ts_stamp) {
75702 ts = peer->tcp_ts;
75703 tsage = get_seconds() - peer->tcp_ts_stamp;
75704 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75705 index fd54c5f..96d6407 100644
75706 --- a/net/ipv4/tcp_ipv4.c
75707 +++ b/net/ipv4/tcp_ipv4.c
75708 @@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75709 int sysctl_tcp_low_latency __read_mostly;
75710 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75711
75712 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75713 +extern int grsec_enable_blackhole;
75714 +#endif
75715
75716 #ifdef CONFIG_TCP_MD5SIG
75717 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75718 @@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75719 return 0;
75720
75721 reset:
75722 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75723 + if (!grsec_enable_blackhole)
75724 +#endif
75725 tcp_v4_send_reset(rsk, skb);
75726 discard:
75727 kfree_skb(skb);
75728 @@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75729 TCP_SKB_CB(skb)->sacked = 0;
75730
75731 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75732 - if (!sk)
75733 + if (!sk) {
75734 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75735 + ret = 1;
75736 +#endif
75737 goto no_tcp_socket;
75738 -
75739 + }
75740 process:
75741 - if (sk->sk_state == TCP_TIME_WAIT)
75742 + if (sk->sk_state == TCP_TIME_WAIT) {
75743 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75744 + ret = 2;
75745 +#endif
75746 goto do_time_wait;
75747 + }
75748
75749 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75750 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75751 @@ -1755,6 +1768,10 @@ no_tcp_socket:
75752 bad_packet:
75753 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75754 } else {
75755 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75756 + if (!grsec_enable_blackhole || (ret == 1 &&
75757 + (skb->dev->flags & IFF_LOOPBACK)))
75758 +#endif
75759 tcp_v4_send_reset(NULL, skb);
75760 }
75761
75762 @@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75763 0, /* non standard timer */
75764 0, /* open_requests have no inode */
75765 atomic_read(&sk->sk_refcnt),
75766 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75767 + NULL,
75768 +#else
75769 req,
75770 +#endif
75771 len);
75772 }
75773
75774 @@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75775 sock_i_uid(sk),
75776 icsk->icsk_probes_out,
75777 sock_i_ino(sk),
75778 - atomic_read(&sk->sk_refcnt), sk,
75779 + atomic_read(&sk->sk_refcnt),
75780 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75781 + NULL,
75782 +#else
75783 + sk,
75784 +#endif
75785 jiffies_to_clock_t(icsk->icsk_rto),
75786 jiffies_to_clock_t(icsk->icsk_ack.ato),
75787 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75788 @@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75789 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75790 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75791 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75792 - atomic_read(&tw->tw_refcnt), tw, len);
75793 + atomic_read(&tw->tw_refcnt),
75794 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75795 + NULL,
75796 +#else
75797 + tw,
75798 +#endif
75799 + len);
75800 }
75801
75802 #define TMPSZ 150
75803 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75804 index 550e755..25721b3 100644
75805 --- a/net/ipv4/tcp_minisocks.c
75806 +++ b/net/ipv4/tcp_minisocks.c
75807 @@ -27,6 +27,10 @@
75808 #include <net/inet_common.h>
75809 #include <net/xfrm.h>
75810
75811 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75812 +extern int grsec_enable_blackhole;
75813 +#endif
75814 +
75815 int sysctl_tcp_syncookies __read_mostly = 1;
75816 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75817
75818 @@ -753,6 +757,10 @@ listen_overflow:
75819
75820 embryonic_reset:
75821 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75822 +
75823 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75824 + if (!grsec_enable_blackhole)
75825 +#endif
75826 if (!(flg & TCP_FLAG_RST))
75827 req->rsk_ops->send_reset(sk, skb);
75828
75829 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75830 index 85ee7eb..53277ab 100644
75831 --- a/net/ipv4/tcp_probe.c
75832 +++ b/net/ipv4/tcp_probe.c
75833 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75834 if (cnt + width >= len)
75835 break;
75836
75837 - if (copy_to_user(buf + cnt, tbuf, width))
75838 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75839 return -EFAULT;
75840 cnt += width;
75841 }
75842 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75843 index cd2e072..1fffee2 100644
75844 --- a/net/ipv4/tcp_timer.c
75845 +++ b/net/ipv4/tcp_timer.c
75846 @@ -22,6 +22,10 @@
75847 #include <linux/gfp.h>
75848 #include <net/tcp.h>
75849
75850 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75851 +extern int grsec_lastack_retries;
75852 +#endif
75853 +
75854 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75855 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75856 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75857 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75858 }
75859 }
75860
75861 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75862 + if ((sk->sk_state == TCP_LAST_ACK) &&
75863 + (grsec_lastack_retries > 0) &&
75864 + (grsec_lastack_retries < retry_until))
75865 + retry_until = grsec_lastack_retries;
75866 +#endif
75867 +
75868 if (retransmits_timed_out(sk, retry_until,
75869 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75870 /* Has it gone just too far? */
75871 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75872 index 5d075b5..d907d5f 100644
75873 --- a/net/ipv4/udp.c
75874 +++ b/net/ipv4/udp.c
75875 @@ -86,6 +86,7 @@
75876 #include <linux/types.h>
75877 #include <linux/fcntl.h>
75878 #include <linux/module.h>
75879 +#include <linux/security.h>
75880 #include <linux/socket.h>
75881 #include <linux/sockios.h>
75882 #include <linux/igmp.h>
75883 @@ -108,6 +109,10 @@
75884 #include <trace/events/udp.h>
75885 #include "udp_impl.h"
75886
75887 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75888 +extern int grsec_enable_blackhole;
75889 +#endif
75890 +
75891 struct udp_table udp_table __read_mostly;
75892 EXPORT_SYMBOL(udp_table);
75893
75894 @@ -566,6 +571,9 @@ found:
75895 return s;
75896 }
75897
75898 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75899 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75900 +
75901 /*
75902 * This routine is called by the ICMP module when it gets some
75903 * sort of error condition. If err < 0 then the socket should
75904 @@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75905 dport = usin->sin_port;
75906 if (dport == 0)
75907 return -EINVAL;
75908 +
75909 + err = gr_search_udp_sendmsg(sk, usin);
75910 + if (err)
75911 + return err;
75912 } else {
75913 if (sk->sk_state != TCP_ESTABLISHED)
75914 return -EDESTADDRREQ;
75915 +
75916 + err = gr_search_udp_sendmsg(sk, NULL);
75917 + if (err)
75918 + return err;
75919 +
75920 daddr = inet->inet_daddr;
75921 dport = inet->inet_dport;
75922 /* Open fast path for connected socket.
75923 @@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
75924 udp_lib_checksum_complete(skb)) {
75925 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75926 IS_UDPLITE(sk));
75927 - atomic_inc(&sk->sk_drops);
75928 + atomic_inc_unchecked(&sk->sk_drops);
75929 __skb_unlink(skb, rcvq);
75930 __skb_queue_tail(&list_kill, skb);
75931 }
75932 @@ -1186,6 +1203,10 @@ try_again:
75933 if (!skb)
75934 goto out;
75935
75936 + err = gr_search_udp_recvmsg(sk, skb);
75937 + if (err)
75938 + goto out_free;
75939 +
75940 ulen = skb->len - sizeof(struct udphdr);
75941 copied = len;
75942 if (copied > ulen)
75943 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75944
75945 drop:
75946 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75947 - atomic_inc(&sk->sk_drops);
75948 + atomic_inc_unchecked(&sk->sk_drops);
75949 kfree_skb(skb);
75950 return -1;
75951 }
75952 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75953 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75954
75955 if (!skb1) {
75956 - atomic_inc(&sk->sk_drops);
75957 + atomic_inc_unchecked(&sk->sk_drops);
75958 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75959 IS_UDPLITE(sk));
75960 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75961 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75962 goto csum_error;
75963
75964 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75965 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75966 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75967 +#endif
75968 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75969
75970 /*
75971 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75972 sk_wmem_alloc_get(sp),
75973 sk_rmem_alloc_get(sp),
75974 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75975 - atomic_read(&sp->sk_refcnt), sp,
75976 - atomic_read(&sp->sk_drops), len);
75977 + atomic_read(&sp->sk_refcnt),
75978 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75979 + NULL,
75980 +#else
75981 + sp,
75982 +#endif
75983 + atomic_read_unchecked(&sp->sk_drops), len);
75984 }
75985
75986 int udp4_seq_show(struct seq_file *seq, void *v)
75987 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75988 index 6b8ebc5..1d624f4 100644
75989 --- a/net/ipv6/addrconf.c
75990 +++ b/net/ipv6/addrconf.c
75991 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75992 p.iph.ihl = 5;
75993 p.iph.protocol = IPPROTO_IPV6;
75994 p.iph.ttl = 64;
75995 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75996 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75997
75998 if (ops->ndo_do_ioctl) {
75999 mm_segment_t oldfs = get_fs();
76000 diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
76001 index 2ae79db..8f101bf 100644
76002 --- a/net/ipv6/ah6.c
76003 +++ b/net/ipv6/ah6.c
76004 @@ -56,6 +56,8 @@ struct ah_skb_cb {
76005 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76006
76007 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76008 + unsigned int size) __size_overflow(3);
76009 +static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76010 unsigned int size)
76011 {
76012 unsigned int len;
76013 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
76014 index 02dd203..e03fcc9 100644
76015 --- a/net/ipv6/inet6_connection_sock.c
76016 +++ b/net/ipv6/inet6_connection_sock.c
76017 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
76018 #ifdef CONFIG_XFRM
76019 {
76020 struct rt6_info *rt = (struct rt6_info *)dst;
76021 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76022 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76023 }
76024 #endif
76025 }
76026 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
76027 #ifdef CONFIG_XFRM
76028 if (dst) {
76029 struct rt6_info *rt = (struct rt6_info *)dst;
76030 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76031 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76032 __sk_dst_reset(sk);
76033 dst = NULL;
76034 }
76035 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
76036 index 18a2719..779f36a 100644
76037 --- a/net/ipv6/ipv6_sockglue.c
76038 +++ b/net/ipv6/ipv6_sockglue.c
76039 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76040 if (sk->sk_type != SOCK_STREAM)
76041 return -ENOPROTOOPT;
76042
76043 - msg.msg_control = optval;
76044 + msg.msg_control = (void __force_kernel *)optval;
76045 msg.msg_controllen = len;
76046 msg.msg_flags = flags;
76047
76048 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
76049 index 94874b0..108a94d 100644
76050 --- a/net/ipv6/netfilter/ip6_tables.c
76051 +++ b/net/ipv6/netfilter/ip6_tables.c
76052 @@ -945,6 +945,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76053 static int
76054 copy_entries_to_user(unsigned int total_size,
76055 const struct xt_table *table,
76056 + void __user *userptr) __size_overflow(1);
76057 +static int
76058 +copy_entries_to_user(unsigned int total_size,
76059 + const struct xt_table *table,
76060 void __user *userptr)
76061 {
76062 unsigned int off, num;
76063 @@ -1194,6 +1198,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
76064 static int
76065 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76066 struct xt_table_info *newinfo, unsigned int num_counters,
76067 + void __user *counters_ptr) __size_overflow(5);
76068 +static int
76069 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76070 + struct xt_table_info *newinfo, unsigned int num_counters,
76071 void __user *counters_ptr)
76072 {
76073 int ret;
76074 @@ -1315,6 +1323,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76075
76076 static int
76077 do_add_counters(struct net *net, const void __user *user, unsigned int len,
76078 + int compat) __size_overflow(3);
76079 +static int
76080 +do_add_counters(struct net *net, const void __user *user, unsigned int len,
76081 int compat)
76082 {
76083 unsigned int i, curcpu;
76084 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76085 index d02f7e4..2d2a0f1 100644
76086 --- a/net/ipv6/raw.c
76087 +++ b/net/ipv6/raw.c
76088 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
76089 {
76090 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
76091 skb_checksum_complete(skb)) {
76092 - atomic_inc(&sk->sk_drops);
76093 + atomic_inc_unchecked(&sk->sk_drops);
76094 kfree_skb(skb);
76095 return NET_RX_DROP;
76096 }
76097 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76098 struct raw6_sock *rp = raw6_sk(sk);
76099
76100 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76101 - atomic_inc(&sk->sk_drops);
76102 + atomic_inc_unchecked(&sk->sk_drops);
76103 kfree_skb(skb);
76104 return NET_RX_DROP;
76105 }
76106 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76107
76108 if (inet->hdrincl) {
76109 if (skb_checksum_complete(skb)) {
76110 - atomic_inc(&sk->sk_drops);
76111 + atomic_inc_unchecked(&sk->sk_drops);
76112 kfree_skb(skb);
76113 return NET_RX_DROP;
76114 }
76115 @@ -602,7 +602,7 @@ out:
76116 return err;
76117 }
76118
76119 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76120 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76121 struct flowi6 *fl6, struct dst_entry **dstp,
76122 unsigned int flags)
76123 {
76124 @@ -912,12 +912,15 @@ do_confirm:
76125 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76126 char __user *optval, int optlen)
76127 {
76128 + struct icmp6_filter filter;
76129 +
76130 switch (optname) {
76131 case ICMPV6_FILTER:
76132 if (optlen > sizeof(struct icmp6_filter))
76133 optlen = sizeof(struct icmp6_filter);
76134 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76135 + if (copy_from_user(&filter, optval, optlen))
76136 return -EFAULT;
76137 + raw6_sk(sk)->filter = filter;
76138 return 0;
76139 default:
76140 return -ENOPROTOOPT;
76141 @@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76142 char __user *optval, int __user *optlen)
76143 {
76144 int len;
76145 + struct icmp6_filter filter;
76146
76147 switch (optname) {
76148 case ICMPV6_FILTER:
76149 @@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76150 len = sizeof(struct icmp6_filter);
76151 if (put_user(len, optlen))
76152 return -EFAULT;
76153 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76154 + filter = raw6_sk(sk)->filter;
76155 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
76156 return -EFAULT;
76157 return 0;
76158 default:
76159 @@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76160 0, 0L, 0,
76161 sock_i_uid(sp), 0,
76162 sock_i_ino(sp),
76163 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76164 + atomic_read(&sp->sk_refcnt),
76165 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76166 + NULL,
76167 +#else
76168 + sp,
76169 +#endif
76170 + atomic_read_unchecked(&sp->sk_drops));
76171 }
76172
76173 static int raw6_seq_show(struct seq_file *seq, void *v)
76174 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76175 index 3edd05a..63aad01 100644
76176 --- a/net/ipv6/tcp_ipv6.c
76177 +++ b/net/ipv6/tcp_ipv6.c
76178 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76179 }
76180 #endif
76181
76182 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76183 +extern int grsec_enable_blackhole;
76184 +#endif
76185 +
76186 static void tcp_v6_hash(struct sock *sk)
76187 {
76188 if (sk->sk_state != TCP_CLOSE) {
76189 @@ -1650,6 +1654,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76190 return 0;
76191
76192 reset:
76193 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76194 + if (!grsec_enable_blackhole)
76195 +#endif
76196 tcp_v6_send_reset(sk, skb);
76197 discard:
76198 if (opt_skb)
76199 @@ -1729,12 +1736,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76200 TCP_SKB_CB(skb)->sacked = 0;
76201
76202 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76203 - if (!sk)
76204 + if (!sk) {
76205 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76206 + ret = 1;
76207 +#endif
76208 goto no_tcp_socket;
76209 + }
76210
76211 process:
76212 - if (sk->sk_state == TCP_TIME_WAIT)
76213 + if (sk->sk_state == TCP_TIME_WAIT) {
76214 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76215 + ret = 2;
76216 +#endif
76217 goto do_time_wait;
76218 + }
76219
76220 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76221 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76222 @@ -1782,6 +1797,10 @@ no_tcp_socket:
76223 bad_packet:
76224 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76225 } else {
76226 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76227 + if (!grsec_enable_blackhole || (ret == 1 &&
76228 + (skb->dev->flags & IFF_LOOPBACK)))
76229 +#endif
76230 tcp_v6_send_reset(NULL, skb);
76231 }
76232
76233 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
76234 uid,
76235 0, /* non standard timer */
76236 0, /* open_requests have no inode */
76237 - 0, req);
76238 + 0,
76239 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76240 + NULL
76241 +#else
76242 + req
76243 +#endif
76244 + );
76245 }
76246
76247 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76248 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76249 sock_i_uid(sp),
76250 icsk->icsk_probes_out,
76251 sock_i_ino(sp),
76252 - atomic_read(&sp->sk_refcnt), sp,
76253 + atomic_read(&sp->sk_refcnt),
76254 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76255 + NULL,
76256 +#else
76257 + sp,
76258 +#endif
76259 jiffies_to_clock_t(icsk->icsk_rto),
76260 jiffies_to_clock_t(icsk->icsk_ack.ato),
76261 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76262 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76263 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76264 tw->tw_substate, 0, 0,
76265 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76266 - atomic_read(&tw->tw_refcnt), tw);
76267 + atomic_read(&tw->tw_refcnt),
76268 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76269 + NULL
76270 +#else
76271 + tw
76272 +#endif
76273 + );
76274 }
76275
76276 static int tcp6_seq_show(struct seq_file *seq, void *v)
76277 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76278 index 4f96b5c..75543d7 100644
76279 --- a/net/ipv6/udp.c
76280 +++ b/net/ipv6/udp.c
76281 @@ -50,6 +50,10 @@
76282 #include <linux/seq_file.h>
76283 #include "udp_impl.h"
76284
76285 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76286 +extern int grsec_enable_blackhole;
76287 +#endif
76288 +
76289 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76290 {
76291 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76292 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76293
76294 return 0;
76295 drop:
76296 - atomic_inc(&sk->sk_drops);
76297 + atomic_inc_unchecked(&sk->sk_drops);
76298 drop_no_sk_drops_inc:
76299 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76300 kfree_skb(skb);
76301 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76302 continue;
76303 }
76304 drop:
76305 - atomic_inc(&sk->sk_drops);
76306 + atomic_inc_unchecked(&sk->sk_drops);
76307 UDP6_INC_STATS_BH(sock_net(sk),
76308 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76309 UDP6_INC_STATS_BH(sock_net(sk),
76310 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76311 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76312 proto == IPPROTO_UDPLITE);
76313
76314 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76315 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76316 +#endif
76317 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76318
76319 kfree_skb(skb);
76320 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76321 if (!sock_owned_by_user(sk))
76322 udpv6_queue_rcv_skb(sk, skb);
76323 else if (sk_add_backlog(sk, skb)) {
76324 - atomic_inc(&sk->sk_drops);
76325 + atomic_inc_unchecked(&sk->sk_drops);
76326 bh_unlock_sock(sk);
76327 sock_put(sk);
76328 goto discard;
76329 @@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76330 0, 0L, 0,
76331 sock_i_uid(sp), 0,
76332 sock_i_ino(sp),
76333 - atomic_read(&sp->sk_refcnt), sp,
76334 - atomic_read(&sp->sk_drops));
76335 + atomic_read(&sp->sk_refcnt),
76336 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76337 + NULL,
76338 +#else
76339 + sp,
76340 +#endif
76341 + atomic_read_unchecked(&sp->sk_drops));
76342 }
76343
76344 int udp6_seq_show(struct seq_file *seq, void *v)
76345 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76346 index 253695d..9481ce8 100644
76347 --- a/net/irda/ircomm/ircomm_tty.c
76348 +++ b/net/irda/ircomm/ircomm_tty.c
76349 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76350 add_wait_queue(&self->open_wait, &wait);
76351
76352 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76353 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76354 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76355
76356 /* As far as I can see, we protect open_count - Jean II */
76357 spin_lock_irqsave(&self->spinlock, flags);
76358 if (!tty_hung_up_p(filp)) {
76359 extra_count = 1;
76360 - self->open_count--;
76361 + local_dec(&self->open_count);
76362 }
76363 spin_unlock_irqrestore(&self->spinlock, flags);
76364 - self->blocked_open++;
76365 + local_inc(&self->blocked_open);
76366
76367 while (1) {
76368 if (tty->termios->c_cflag & CBAUD) {
76369 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76370 }
76371
76372 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76373 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76374 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76375
76376 schedule();
76377 }
76378 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76379 if (extra_count) {
76380 /* ++ is not atomic, so this should be protected - Jean II */
76381 spin_lock_irqsave(&self->spinlock, flags);
76382 - self->open_count++;
76383 + local_inc(&self->open_count);
76384 spin_unlock_irqrestore(&self->spinlock, flags);
76385 }
76386 - self->blocked_open--;
76387 + local_dec(&self->blocked_open);
76388
76389 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76390 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76391 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76392
76393 if (!retval)
76394 self->flags |= ASYNC_NORMAL_ACTIVE;
76395 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76396 }
76397 /* ++ is not atomic, so this should be protected - Jean II */
76398 spin_lock_irqsave(&self->spinlock, flags);
76399 - self->open_count++;
76400 + local_inc(&self->open_count);
76401
76402 tty->driver_data = self;
76403 self->tty = tty;
76404 spin_unlock_irqrestore(&self->spinlock, flags);
76405
76406 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76407 - self->line, self->open_count);
76408 + self->line, local_read(&self->open_count));
76409
76410 /* Not really used by us, but lets do it anyway */
76411 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76412 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76413 return;
76414 }
76415
76416 - if ((tty->count == 1) && (self->open_count != 1)) {
76417 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76418 /*
76419 * Uh, oh. tty->count is 1, which means that the tty
76420 * structure will be freed. state->count should always
76421 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76422 */
76423 IRDA_DEBUG(0, "%s(), bad serial port count; "
76424 "tty->count is 1, state->count is %d\n", __func__ ,
76425 - self->open_count);
76426 - self->open_count = 1;
76427 + local_read(&self->open_count));
76428 + local_set(&self->open_count, 1);
76429 }
76430
76431 - if (--self->open_count < 0) {
76432 + if (local_dec_return(&self->open_count) < 0) {
76433 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76434 - __func__, self->line, self->open_count);
76435 - self->open_count = 0;
76436 + __func__, self->line, local_read(&self->open_count));
76437 + local_set(&self->open_count, 0);
76438 }
76439 - if (self->open_count) {
76440 + if (local_read(&self->open_count)) {
76441 spin_unlock_irqrestore(&self->spinlock, flags);
76442
76443 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76444 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76445 tty->closing = 0;
76446 self->tty = NULL;
76447
76448 - if (self->blocked_open) {
76449 + if (local_read(&self->blocked_open)) {
76450 if (self->close_delay)
76451 schedule_timeout_interruptible(self->close_delay);
76452 wake_up_interruptible(&self->open_wait);
76453 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76454 spin_lock_irqsave(&self->spinlock, flags);
76455 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76456 self->tty = NULL;
76457 - self->open_count = 0;
76458 + local_set(&self->open_count, 0);
76459 spin_unlock_irqrestore(&self->spinlock, flags);
76460
76461 wake_up_interruptible(&self->open_wait);
76462 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76463 seq_putc(m, '\n');
76464
76465 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76466 - seq_printf(m, "Open count: %d\n", self->open_count);
76467 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76468 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76469 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76470
76471 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76472 index d5c5b8f..33beff0 100644
76473 --- a/net/iucv/af_iucv.c
76474 +++ b/net/iucv/af_iucv.c
76475 @@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
76476
76477 write_lock_bh(&iucv_sk_list.lock);
76478
76479 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76480 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76481 while (__iucv_get_sock_by_name(name)) {
76482 sprintf(name, "%08x",
76483 - atomic_inc_return(&iucv_sk_list.autobind_name));
76484 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76485 }
76486
76487 write_unlock_bh(&iucv_sk_list.lock);
76488 diff --git a/net/key/af_key.c b/net/key/af_key.c
76489 index 11dbb22..c20f667 100644
76490 --- a/net/key/af_key.c
76491 +++ b/net/key/af_key.c
76492 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76493 static u32 get_acqseq(void)
76494 {
76495 u32 res;
76496 - static atomic_t acqseq;
76497 + static atomic_unchecked_t acqseq;
76498
76499 do {
76500 - res = atomic_inc_return(&acqseq);
76501 + res = atomic_inc_return_unchecked(&acqseq);
76502 } while (!res);
76503 return res;
76504 }
76505 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76506 index 2f0642d..e5c6fba 100644
76507 --- a/net/mac80211/ieee80211_i.h
76508 +++ b/net/mac80211/ieee80211_i.h
76509 @@ -28,6 +28,7 @@
76510 #include <net/ieee80211_radiotap.h>
76511 #include <net/cfg80211.h>
76512 #include <net/mac80211.h>
76513 +#include <asm/local.h>
76514 #include "key.h"
76515 #include "sta_info.h"
76516
76517 @@ -781,7 +782,7 @@ struct ieee80211_local {
76518 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76519 spinlock_t queue_stop_reason_lock;
76520
76521 - int open_count;
76522 + local_t open_count;
76523 int monitors, cooked_mntrs;
76524 /* number of interfaces with corresponding FIF_ flags */
76525 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76526 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76527 index 8e2137b..2974283 100644
76528 --- a/net/mac80211/iface.c
76529 +++ b/net/mac80211/iface.c
76530 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76531 break;
76532 }
76533
76534 - if (local->open_count == 0) {
76535 + if (local_read(&local->open_count) == 0) {
76536 res = drv_start(local);
76537 if (res)
76538 goto err_del_bss;
76539 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76540 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76541
76542 if (!is_valid_ether_addr(dev->dev_addr)) {
76543 - if (!local->open_count)
76544 + if (!local_read(&local->open_count))
76545 drv_stop(local);
76546 return -EADDRNOTAVAIL;
76547 }
76548 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76549 mutex_unlock(&local->mtx);
76550
76551 if (coming_up)
76552 - local->open_count++;
76553 + local_inc(&local->open_count);
76554
76555 if (hw_reconf_flags)
76556 ieee80211_hw_config(local, hw_reconf_flags);
76557 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76558 err_del_interface:
76559 drv_remove_interface(local, sdata);
76560 err_stop:
76561 - if (!local->open_count)
76562 + if (!local_read(&local->open_count))
76563 drv_stop(local);
76564 err_del_bss:
76565 sdata->bss = NULL;
76566 @@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76567 }
76568
76569 if (going_down)
76570 - local->open_count--;
76571 + local_dec(&local->open_count);
76572
76573 switch (sdata->vif.type) {
76574 case NL80211_IFTYPE_AP_VLAN:
76575 @@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76576
76577 ieee80211_recalc_ps(local, -1);
76578
76579 - if (local->open_count == 0) {
76580 + if (local_read(&local->open_count) == 0) {
76581 if (local->ops->napi_poll)
76582 napi_disable(&local->napi);
76583 ieee80211_clear_tx_pending(local);
76584 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76585 index b142bd4..a651749 100644
76586 --- a/net/mac80211/main.c
76587 +++ b/net/mac80211/main.c
76588 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76589 local->hw.conf.power_level = power;
76590 }
76591
76592 - if (changed && local->open_count) {
76593 + if (changed && local_read(&local->open_count)) {
76594 ret = drv_config(local, changed);
76595 /*
76596 * Goal:
76597 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76598 index 596efaf..8f1911f 100644
76599 --- a/net/mac80211/pm.c
76600 +++ b/net/mac80211/pm.c
76601 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76602 struct ieee80211_sub_if_data *sdata;
76603 struct sta_info *sta;
76604
76605 - if (!local->open_count)
76606 + if (!local_read(&local->open_count))
76607 goto suspend;
76608
76609 ieee80211_scan_cancel(local);
76610 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76611 cancel_work_sync(&local->dynamic_ps_enable_work);
76612 del_timer_sync(&local->dynamic_ps_timer);
76613
76614 - local->wowlan = wowlan && local->open_count;
76615 + local->wowlan = wowlan && local_read(&local->open_count);
76616 if (local->wowlan) {
76617 int err = drv_suspend(local, wowlan);
76618 if (err < 0) {
76619 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76620 }
76621
76622 /* stop hardware - this must stop RX */
76623 - if (local->open_count)
76624 + if (local_read(&local->open_count))
76625 ieee80211_stop_device(local);
76626
76627 suspend:
76628 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76629 index f9b8e81..bb89b46 100644
76630 --- a/net/mac80211/rate.c
76631 +++ b/net/mac80211/rate.c
76632 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76633
76634 ASSERT_RTNL();
76635
76636 - if (local->open_count)
76637 + if (local_read(&local->open_count))
76638 return -EBUSY;
76639
76640 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76641 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76642 index c97a065..ff61928 100644
76643 --- a/net/mac80211/rc80211_pid_debugfs.c
76644 +++ b/net/mac80211/rc80211_pid_debugfs.c
76645 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76646
76647 spin_unlock_irqrestore(&events->lock, status);
76648
76649 - if (copy_to_user(buf, pb, p))
76650 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76651 return -EFAULT;
76652
76653 return p;
76654 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76655 index 9919892..8c49803 100644
76656 --- a/net/mac80211/util.c
76657 +++ b/net/mac80211/util.c
76658 @@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76659 }
76660 #endif
76661 /* everything else happens only if HW was up & running */
76662 - if (!local->open_count)
76663 + if (!local_read(&local->open_count))
76664 goto wake_up;
76665
76666 /*
76667 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76668 index f8ac4ef..b02560b 100644
76669 --- a/net/netfilter/Kconfig
76670 +++ b/net/netfilter/Kconfig
76671 @@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
76672
76673 To compile it as a module, choose M here. If unsure, say N.
76674
76675 +config NETFILTER_XT_MATCH_GRADM
76676 + tristate '"gradm" match support'
76677 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76678 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76679 + ---help---
76680 + The gradm match allows to match on grsecurity RBAC being enabled.
76681 + It is useful when iptables rules are applied early on bootup to
76682 + prevent connections to the machine (except from a trusted host)
76683 + while the RBAC system is disabled.
76684 +
76685 config NETFILTER_XT_MATCH_HASHLIMIT
76686 tristate '"hashlimit" match support'
76687 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76688 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76689 index 40f4c3d..0d5dd6b 100644
76690 --- a/net/netfilter/Makefile
76691 +++ b/net/netfilter/Makefile
76692 @@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76693 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76694 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76695 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76696 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76697 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76698 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76699 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76700 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76701 index 29fa5ba..8debc79 100644
76702 --- a/net/netfilter/ipvs/ip_vs_conn.c
76703 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76704 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76705 /* Increase the refcnt counter of the dest */
76706 atomic_inc(&dest->refcnt);
76707
76708 - conn_flags = atomic_read(&dest->conn_flags);
76709 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76710 if (cp->protocol != IPPROTO_UDP)
76711 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76712 /* Bind with the destination and its corresponding transmitter */
76713 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76714 atomic_set(&cp->refcnt, 1);
76715
76716 atomic_set(&cp->n_control, 0);
76717 - atomic_set(&cp->in_pkts, 0);
76718 + atomic_set_unchecked(&cp->in_pkts, 0);
76719
76720 atomic_inc(&ipvs->conn_count);
76721 if (flags & IP_VS_CONN_F_NO_CPORT)
76722 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76723
76724 /* Don't drop the entry if its number of incoming packets is not
76725 located in [0, 8] */
76726 - i = atomic_read(&cp->in_pkts);
76727 + i = atomic_read_unchecked(&cp->in_pkts);
76728 if (i > 8 || i < 0) return 0;
76729
76730 if (!todrop_rate[i]) return 0;
76731 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76732 index 2555816..31492d9 100644
76733 --- a/net/netfilter/ipvs/ip_vs_core.c
76734 +++ b/net/netfilter/ipvs/ip_vs_core.c
76735 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76736 ret = cp->packet_xmit(skb, cp, pd->pp);
76737 /* do not touch skb anymore */
76738
76739 - atomic_inc(&cp->in_pkts);
76740 + atomic_inc_unchecked(&cp->in_pkts);
76741 ip_vs_conn_put(cp);
76742 return ret;
76743 }
76744 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76745 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76746 pkts = sysctl_sync_threshold(ipvs);
76747 else
76748 - pkts = atomic_add_return(1, &cp->in_pkts);
76749 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76750
76751 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76752 cp->protocol == IPPROTO_SCTP) {
76753 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76754 index b3afe18..08ec940 100644
76755 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76756 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76757 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76758 ip_vs_rs_hash(ipvs, dest);
76759 write_unlock_bh(&ipvs->rs_lock);
76760 }
76761 - atomic_set(&dest->conn_flags, conn_flags);
76762 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76763
76764 /* bind the service */
76765 if (!dest->svc) {
76766 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76767 " %-7s %-6d %-10d %-10d\n",
76768 &dest->addr.in6,
76769 ntohs(dest->port),
76770 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76771 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76772 atomic_read(&dest->weight),
76773 atomic_read(&dest->activeconns),
76774 atomic_read(&dest->inactconns));
76775 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76776 "%-7s %-6d %-10d %-10d\n",
76777 ntohl(dest->addr.ip),
76778 ntohs(dest->port),
76779 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76780 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76781 atomic_read(&dest->weight),
76782 atomic_read(&dest->activeconns),
76783 atomic_read(&dest->inactconns));
76784 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76785
76786 entry.addr = dest->addr.ip;
76787 entry.port = dest->port;
76788 - entry.conn_flags = atomic_read(&dest->conn_flags);
76789 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76790 entry.weight = atomic_read(&dest->weight);
76791 entry.u_threshold = dest->u_threshold;
76792 entry.l_threshold = dest->l_threshold;
76793 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76794 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76795
76796 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76797 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76798 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76799 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76800 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76801 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76802 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76803 index 8a0d6d6..90ec197 100644
76804 --- a/net/netfilter/ipvs/ip_vs_sync.c
76805 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76806 @@ -649,7 +649,7 @@ control:
76807 * i.e only increment in_pkts for Templates.
76808 */
76809 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76810 - int pkts = atomic_add_return(1, &cp->in_pkts);
76811 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76812
76813 if (pkts % sysctl_sync_period(ipvs) != 1)
76814 return;
76815 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76816
76817 if (opt)
76818 memcpy(&cp->in_seq, opt, sizeof(*opt));
76819 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76820 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76821 cp->state = state;
76822 cp->old_state = cp->state;
76823 /*
76824 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76825 index 7fd66de..e6fb361 100644
76826 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76827 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76828 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76829 else
76830 rc = NF_ACCEPT;
76831 /* do not touch skb anymore */
76832 - atomic_inc(&cp->in_pkts);
76833 + atomic_inc_unchecked(&cp->in_pkts);
76834 goto out;
76835 }
76836
76837 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76838 else
76839 rc = NF_ACCEPT;
76840 /* do not touch skb anymore */
76841 - atomic_inc(&cp->in_pkts);
76842 + atomic_inc_unchecked(&cp->in_pkts);
76843 goto out;
76844 }
76845
76846 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76847 index 66b2c54..c7884e3 100644
76848 --- a/net/netfilter/nfnetlink_log.c
76849 +++ b/net/netfilter/nfnetlink_log.c
76850 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76851 };
76852
76853 static DEFINE_SPINLOCK(instances_lock);
76854 -static atomic_t global_seq;
76855 +static atomic_unchecked_t global_seq;
76856
76857 #define INSTANCE_BUCKETS 16
76858 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76859 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76860 /* global sequence number */
76861 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76862 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76863 - htonl(atomic_inc_return(&global_seq)));
76864 + htonl(atomic_inc_return_unchecked(&global_seq)));
76865
76866 if (data_len) {
76867 struct nlattr *nla;
76868 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76869 new file mode 100644
76870 index 0000000..6905327
76871 --- /dev/null
76872 +++ b/net/netfilter/xt_gradm.c
76873 @@ -0,0 +1,51 @@
76874 +/*
76875 + * gradm match for netfilter
76876 + * Copyright © Zbigniew Krzystolik, 2010
76877 + *
76878 + * This program is free software; you can redistribute it and/or modify
76879 + * it under the terms of the GNU General Public License; either version
76880 + * 2 or 3 as published by the Free Software Foundation.
76881 + */
76882 +#include <linux/module.h>
76883 +#include <linux/moduleparam.h>
76884 +#include <linux/skbuff.h>
76885 +#include <linux/netfilter/x_tables.h>
76886 +#include <linux/grsecurity.h>
76887 +#include <linux/netfilter/xt_gradm.h>
76888 +
76889 +static bool
76890 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76891 +{
76892 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76893 + bool retval = false;
76894 + if (gr_acl_is_enabled())
76895 + retval = true;
76896 + return retval ^ info->invflags;
76897 +}
76898 +
76899 +static struct xt_match gradm_mt_reg __read_mostly = {
76900 + .name = "gradm",
76901 + .revision = 0,
76902 + .family = NFPROTO_UNSPEC,
76903 + .match = gradm_mt,
76904 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76905 + .me = THIS_MODULE,
76906 +};
76907 +
76908 +static int __init gradm_mt_init(void)
76909 +{
76910 + return xt_register_match(&gradm_mt_reg);
76911 +}
76912 +
76913 +static void __exit gradm_mt_exit(void)
76914 +{
76915 + xt_unregister_match(&gradm_mt_reg);
76916 +}
76917 +
76918 +module_init(gradm_mt_init);
76919 +module_exit(gradm_mt_exit);
76920 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76921 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76922 +MODULE_LICENSE("GPL");
76923 +MODULE_ALIAS("ipt_gradm");
76924 +MODULE_ALIAS("ip6t_gradm");
76925 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76926 index 4fe4fb4..87a89e5 100644
76927 --- a/net/netfilter/xt_statistic.c
76928 +++ b/net/netfilter/xt_statistic.c
76929 @@ -19,7 +19,7 @@
76930 #include <linux/module.h>
76931
76932 struct xt_statistic_priv {
76933 - atomic_t count;
76934 + atomic_unchecked_t count;
76935 } ____cacheline_aligned_in_smp;
76936
76937 MODULE_LICENSE("GPL");
76938 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76939 break;
76940 case XT_STATISTIC_MODE_NTH:
76941 do {
76942 - oval = atomic_read(&info->master->count);
76943 + oval = atomic_read_unchecked(&info->master->count);
76944 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76945 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76946 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76947 if (nval == 0)
76948 ret = !ret;
76949 break;
76950 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76951 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76952 if (info->master == NULL)
76953 return -ENOMEM;
76954 - atomic_set(&info->master->count, info->u.nth.count);
76955 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76956
76957 return 0;
76958 }
76959 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76960 index 629b061..8f415cc 100644
76961 --- a/net/netlink/af_netlink.c
76962 +++ b/net/netlink/af_netlink.c
76963 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
76964 sk->sk_error_report(sk);
76965 }
76966 }
76967 - atomic_inc(&sk->sk_drops);
76968 + atomic_inc_unchecked(&sk->sk_drops);
76969 }
76970
76971 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76972 @@ -829,12 +829,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
76973 return 0;
76974 }
76975
76976 -int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
76977 +static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
76978 {
76979 int len = skb->len;
76980
76981 skb_queue_tail(&sk->sk_receive_queue, skb);
76982 sk->sk_data_ready(sk, len);
76983 + return len;
76984 +}
76985 +
76986 +int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
76987 +{
76988 + int len = __netlink_sendskb(sk, skb);
76989 +
76990 sock_put(sk);
76991 return len;
76992 }
76993 @@ -957,8 +964,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
76994 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
76995 !test_bit(0, &nlk->state)) {
76996 skb_set_owner_r(skb, sk);
76997 - skb_queue_tail(&sk->sk_receive_queue, skb);
76998 - sk->sk_data_ready(sk, skb->len);
76999 + __netlink_sendskb(sk, skb);
77000 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
77001 }
77002 return -1;
77003 @@ -1680,10 +1686,8 @@ static int netlink_dump(struct sock *sk)
77004
77005 if (sk_filter(sk, skb))
77006 kfree_skb(skb);
77007 - else {
77008 - skb_queue_tail(&sk->sk_receive_queue, skb);
77009 - sk->sk_data_ready(sk, skb->len);
77010 - }
77011 + else
77012 + __netlink_sendskb(sk, skb);
77013 return 0;
77014 }
77015
77016 @@ -1697,10 +1701,8 @@ static int netlink_dump(struct sock *sk)
77017
77018 if (sk_filter(sk, skb))
77019 kfree_skb(skb);
77020 - else {
77021 - skb_queue_tail(&sk->sk_receive_queue, skb);
77022 - sk->sk_data_ready(sk, skb->len);
77023 - }
77024 + else
77025 + __netlink_sendskb(sk, skb);
77026
77027 if (cb->done)
77028 cb->done(cb);
77029 @@ -1995,7 +1997,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
77030 sk_wmem_alloc_get(s),
77031 nlk->cb,
77032 atomic_read(&s->sk_refcnt),
77033 - atomic_read(&s->sk_drops),
77034 + atomic_read_unchecked(&s->sk_drops),
77035 sock_i_ino(s)
77036 );
77037
77038 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
77039 index 7dab229..212156f 100644
77040 --- a/net/netrom/af_netrom.c
77041 +++ b/net/netrom/af_netrom.c
77042 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77043 struct sock *sk = sock->sk;
77044 struct nr_sock *nr = nr_sk(sk);
77045
77046 + memset(sax, 0, sizeof(*sax));
77047 lock_sock(sk);
77048 if (peer != 0) {
77049 if (sk->sk_state != TCP_ESTABLISHED) {
77050 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77051 *uaddr_len = sizeof(struct full_sockaddr_ax25);
77052 } else {
77053 sax->fsa_ax25.sax25_family = AF_NETROM;
77054 - sax->fsa_ax25.sax25_ndigis = 0;
77055 sax->fsa_ax25.sax25_call = nr->source_addr;
77056 *uaddr_len = sizeof(struct sockaddr_ax25);
77057 }
77058 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
77059 index 2dbb32b..a1b4722 100644
77060 --- a/net/packet/af_packet.c
77061 +++ b/net/packet/af_packet.c
77062 @@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77063
77064 spin_lock(&sk->sk_receive_queue.lock);
77065 po->stats.tp_packets++;
77066 - skb->dropcount = atomic_read(&sk->sk_drops);
77067 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
77068 __skb_queue_tail(&sk->sk_receive_queue, skb);
77069 spin_unlock(&sk->sk_receive_queue.lock);
77070 sk->sk_data_ready(sk, skb->len);
77071 @@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77072 drop_n_acct:
77073 spin_lock(&sk->sk_receive_queue.lock);
77074 po->stats.tp_drops++;
77075 - atomic_inc(&sk->sk_drops);
77076 + atomic_inc_unchecked(&sk->sk_drops);
77077 spin_unlock(&sk->sk_receive_queue.lock);
77078
77079 drop_n_restore:
77080 @@ -3271,7 +3271,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77081 case PACKET_HDRLEN:
77082 if (len > sizeof(int))
77083 len = sizeof(int);
77084 - if (copy_from_user(&val, optval, len))
77085 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
77086 return -EFAULT;
77087 switch (val) {
77088 case TPACKET_V1:
77089 @@ -3321,7 +3321,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77090
77091 if (put_user(len, optlen))
77092 return -EFAULT;
77093 - if (copy_to_user(optval, data, len))
77094 + if (len > sizeof(st) || copy_to_user(optval, data, len))
77095 return -EFAULT;
77096 return 0;
77097 }
77098 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
77099 index d65f699..05aa6ce 100644
77100 --- a/net/phonet/af_phonet.c
77101 +++ b/net/phonet/af_phonet.c
77102 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
77103 {
77104 struct phonet_protocol *pp;
77105
77106 - if (protocol >= PHONET_NPROTO)
77107 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77108 return NULL;
77109
77110 rcu_read_lock();
77111 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
77112 {
77113 int err = 0;
77114
77115 - if (protocol >= PHONET_NPROTO)
77116 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77117 return -EINVAL;
77118
77119 err = proto_register(pp->prot, 1);
77120 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
77121 index 9f60008..ae96f04 100644
77122 --- a/net/phonet/pep.c
77123 +++ b/net/phonet/pep.c
77124 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77125
77126 case PNS_PEP_CTRL_REQ:
77127 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
77128 - atomic_inc(&sk->sk_drops);
77129 + atomic_inc_unchecked(&sk->sk_drops);
77130 break;
77131 }
77132 __skb_pull(skb, 4);
77133 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77134 }
77135
77136 if (pn->rx_credits == 0) {
77137 - atomic_inc(&sk->sk_drops);
77138 + atomic_inc_unchecked(&sk->sk_drops);
77139 err = -ENOBUFS;
77140 break;
77141 }
77142 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
77143 }
77144
77145 if (pn->rx_credits == 0) {
77146 - atomic_inc(&sk->sk_drops);
77147 + atomic_inc_unchecked(&sk->sk_drops);
77148 err = NET_RX_DROP;
77149 break;
77150 }
77151 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
77152 index 4c7eff3..59c727f 100644
77153 --- a/net/phonet/socket.c
77154 +++ b/net/phonet/socket.c
77155 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
77156 pn->resource, sk->sk_state,
77157 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
77158 sock_i_uid(sk), sock_i_ino(sk),
77159 - atomic_read(&sk->sk_refcnt), sk,
77160 - atomic_read(&sk->sk_drops), &len);
77161 + atomic_read(&sk->sk_refcnt),
77162 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77163 + NULL,
77164 +#else
77165 + sk,
77166 +#endif
77167 + atomic_read_unchecked(&sk->sk_drops), &len);
77168 }
77169 seq_printf(seq, "%*s\n", 127 - len, "");
77170 return 0;
77171 diff --git a/net/rds/cong.c b/net/rds/cong.c
77172 index e5b65ac..f3b6fb7 100644
77173 --- a/net/rds/cong.c
77174 +++ b/net/rds/cong.c
77175 @@ -78,7 +78,7 @@
77176 * finds that the saved generation number is smaller than the global generation
77177 * number, it wakes up the process.
77178 */
77179 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
77180 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
77181
77182 /*
77183 * Congestion monitoring
77184 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
77185 rdsdebug("waking map %p for %pI4\n",
77186 map, &map->m_addr);
77187 rds_stats_inc(s_cong_update_received);
77188 - atomic_inc(&rds_cong_generation);
77189 + atomic_inc_unchecked(&rds_cong_generation);
77190 if (waitqueue_active(&map->m_waitq))
77191 wake_up(&map->m_waitq);
77192 if (waitqueue_active(&rds_poll_waitq))
77193 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
77194
77195 int rds_cong_updated_since(unsigned long *recent)
77196 {
77197 - unsigned long gen = atomic_read(&rds_cong_generation);
77198 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
77199
77200 if (likely(*recent == gen))
77201 return 0;
77202 diff --git a/net/rds/ib.h b/net/rds/ib.h
77203 index edfaaaf..8c89879 100644
77204 --- a/net/rds/ib.h
77205 +++ b/net/rds/ib.h
77206 @@ -128,7 +128,7 @@ struct rds_ib_connection {
77207 /* sending acks */
77208 unsigned long i_ack_flags;
77209 #ifdef KERNEL_HAS_ATOMIC64
77210 - atomic64_t i_ack_next; /* next ACK to send */
77211 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77212 #else
77213 spinlock_t i_ack_lock; /* protect i_ack_next */
77214 u64 i_ack_next; /* next ACK to send */
77215 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
77216 index 51c8689..36c555f 100644
77217 --- a/net/rds/ib_cm.c
77218 +++ b/net/rds/ib_cm.c
77219 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
77220 /* Clear the ACK state */
77221 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77222 #ifdef KERNEL_HAS_ATOMIC64
77223 - atomic64_set(&ic->i_ack_next, 0);
77224 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77225 #else
77226 ic->i_ack_next = 0;
77227 #endif
77228 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
77229 index e29e0ca..fa3a6a3 100644
77230 --- a/net/rds/ib_recv.c
77231 +++ b/net/rds/ib_recv.c
77232 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77233 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
77234 int ack_required)
77235 {
77236 - atomic64_set(&ic->i_ack_next, seq);
77237 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77238 if (ack_required) {
77239 smp_mb__before_clear_bit();
77240 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77241 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77242 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77243 smp_mb__after_clear_bit();
77244
77245 - return atomic64_read(&ic->i_ack_next);
77246 + return atomic64_read_unchecked(&ic->i_ack_next);
77247 }
77248 #endif
77249
77250 diff --git a/net/rds/iw.h b/net/rds/iw.h
77251 index 04ce3b1..48119a6 100644
77252 --- a/net/rds/iw.h
77253 +++ b/net/rds/iw.h
77254 @@ -134,7 +134,7 @@ struct rds_iw_connection {
77255 /* sending acks */
77256 unsigned long i_ack_flags;
77257 #ifdef KERNEL_HAS_ATOMIC64
77258 - atomic64_t i_ack_next; /* next ACK to send */
77259 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77260 #else
77261 spinlock_t i_ack_lock; /* protect i_ack_next */
77262 u64 i_ack_next; /* next ACK to send */
77263 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
77264 index 9556d28..f046d0e 100644
77265 --- a/net/rds/iw_cm.c
77266 +++ b/net/rds/iw_cm.c
77267 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
77268 /* Clear the ACK state */
77269 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77270 #ifdef KERNEL_HAS_ATOMIC64
77271 - atomic64_set(&ic->i_ack_next, 0);
77272 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77273 #else
77274 ic->i_ack_next = 0;
77275 #endif
77276 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
77277 index 5e57347..3916042 100644
77278 --- a/net/rds/iw_recv.c
77279 +++ b/net/rds/iw_recv.c
77280 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77281 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77282 int ack_required)
77283 {
77284 - atomic64_set(&ic->i_ack_next, seq);
77285 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77286 if (ack_required) {
77287 smp_mb__before_clear_bit();
77288 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77289 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77290 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77291 smp_mb__after_clear_bit();
77292
77293 - return atomic64_read(&ic->i_ack_next);
77294 + return atomic64_read_unchecked(&ic->i_ack_next);
77295 }
77296 #endif
77297
77298 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77299 index edac9ef..16bcb98 100644
77300 --- a/net/rds/tcp.c
77301 +++ b/net/rds/tcp.c
77302 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
77303 int val = 1;
77304
77305 set_fs(KERNEL_DS);
77306 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77307 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77308 sizeof(val));
77309 set_fs(oldfs);
77310 }
77311 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77312 index 1b4fd68..2234175 100644
77313 --- a/net/rds/tcp_send.c
77314 +++ b/net/rds/tcp_send.c
77315 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77316
77317 oldfs = get_fs();
77318 set_fs(KERNEL_DS);
77319 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77320 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77321 sizeof(val));
77322 set_fs(oldfs);
77323 }
77324 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77325 index 74c064c..fdec26f 100644
77326 --- a/net/rxrpc/af_rxrpc.c
77327 +++ b/net/rxrpc/af_rxrpc.c
77328 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77329 __be32 rxrpc_epoch;
77330
77331 /* current debugging ID */
77332 -atomic_t rxrpc_debug_id;
77333 +atomic_unchecked_t rxrpc_debug_id;
77334
77335 /* count of skbs currently in use */
77336 atomic_t rxrpc_n_skbs;
77337 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77338 index c3126e8..21facc7 100644
77339 --- a/net/rxrpc/ar-ack.c
77340 +++ b/net/rxrpc/ar-ack.c
77341 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77342
77343 _enter("{%d,%d,%d,%d},",
77344 call->acks_hard, call->acks_unacked,
77345 - atomic_read(&call->sequence),
77346 + atomic_read_unchecked(&call->sequence),
77347 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77348
77349 stop = 0;
77350 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77351
77352 /* each Tx packet has a new serial number */
77353 sp->hdr.serial =
77354 - htonl(atomic_inc_return(&call->conn->serial));
77355 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77356
77357 hdr = (struct rxrpc_header *) txb->head;
77358 hdr->serial = sp->hdr.serial;
77359 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77360 */
77361 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77362 {
77363 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77364 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77365 }
77366
77367 /*
77368 @@ -629,7 +629,7 @@ process_further:
77369
77370 latest = ntohl(sp->hdr.serial);
77371 hard = ntohl(ack.firstPacket);
77372 - tx = atomic_read(&call->sequence);
77373 + tx = atomic_read_unchecked(&call->sequence);
77374
77375 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77376 latest,
77377 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
77378 goto maybe_reschedule;
77379
77380 send_ACK_with_skew:
77381 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77382 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77383 ntohl(ack.serial));
77384 send_ACK:
77385 mtu = call->conn->trans->peer->if_mtu;
77386 @@ -1173,7 +1173,7 @@ send_ACK:
77387 ackinfo.rxMTU = htonl(5692);
77388 ackinfo.jumbo_max = htonl(4);
77389
77390 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77391 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77392 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77393 ntohl(hdr.serial),
77394 ntohs(ack.maxSkew),
77395 @@ -1191,7 +1191,7 @@ send_ACK:
77396 send_message:
77397 _debug("send message");
77398
77399 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77400 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77401 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77402 send_message_2:
77403
77404 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77405 index bf656c2..48f9d27 100644
77406 --- a/net/rxrpc/ar-call.c
77407 +++ b/net/rxrpc/ar-call.c
77408 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77409 spin_lock_init(&call->lock);
77410 rwlock_init(&call->state_lock);
77411 atomic_set(&call->usage, 1);
77412 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77413 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77414 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77415
77416 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77417 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77418 index 4106ca9..a338d7a 100644
77419 --- a/net/rxrpc/ar-connection.c
77420 +++ b/net/rxrpc/ar-connection.c
77421 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77422 rwlock_init(&conn->lock);
77423 spin_lock_init(&conn->state_lock);
77424 atomic_set(&conn->usage, 1);
77425 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77426 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77427 conn->avail_calls = RXRPC_MAXCALLS;
77428 conn->size_align = 4;
77429 conn->header_size = sizeof(struct rxrpc_header);
77430 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77431 index e7ed43a..6afa140 100644
77432 --- a/net/rxrpc/ar-connevent.c
77433 +++ b/net/rxrpc/ar-connevent.c
77434 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77435
77436 len = iov[0].iov_len + iov[1].iov_len;
77437
77438 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77439 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77440 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77441
77442 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77443 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77444 index 1a2b0633..e8d1382 100644
77445 --- a/net/rxrpc/ar-input.c
77446 +++ b/net/rxrpc/ar-input.c
77447 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77448 /* track the latest serial number on this connection for ACK packet
77449 * information */
77450 serial = ntohl(sp->hdr.serial);
77451 - hi_serial = atomic_read(&call->conn->hi_serial);
77452 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77453 while (serial > hi_serial)
77454 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77455 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77456 serial);
77457
77458 /* request ACK generation for any ACK or DATA packet that requests
77459 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77460 index 8e22bd3..f66d1c0 100644
77461 --- a/net/rxrpc/ar-internal.h
77462 +++ b/net/rxrpc/ar-internal.h
77463 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77464 int error; /* error code for local abort */
77465 int debug_id; /* debug ID for printks */
77466 unsigned call_counter; /* call ID counter */
77467 - atomic_t serial; /* packet serial number counter */
77468 - atomic_t hi_serial; /* highest serial number received */
77469 + atomic_unchecked_t serial; /* packet serial number counter */
77470 + atomic_unchecked_t hi_serial; /* highest serial number received */
77471 u8 avail_calls; /* number of calls available */
77472 u8 size_align; /* data size alignment (for security) */
77473 u8 header_size; /* rxrpc + security header size */
77474 @@ -346,7 +346,7 @@ struct rxrpc_call {
77475 spinlock_t lock;
77476 rwlock_t state_lock; /* lock for state transition */
77477 atomic_t usage;
77478 - atomic_t sequence; /* Tx data packet sequence counter */
77479 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77480 u32 abort_code; /* local/remote abort code */
77481 enum { /* current state of call */
77482 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77483 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77484 */
77485 extern atomic_t rxrpc_n_skbs;
77486 extern __be32 rxrpc_epoch;
77487 -extern atomic_t rxrpc_debug_id;
77488 +extern atomic_unchecked_t rxrpc_debug_id;
77489 extern struct workqueue_struct *rxrpc_workqueue;
77490
77491 /*
77492 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77493 index 87f7135..74d3703 100644
77494 --- a/net/rxrpc/ar-local.c
77495 +++ b/net/rxrpc/ar-local.c
77496 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77497 spin_lock_init(&local->lock);
77498 rwlock_init(&local->services_lock);
77499 atomic_set(&local->usage, 1);
77500 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77501 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77502 memcpy(&local->srx, srx, sizeof(*srx));
77503 }
77504
77505 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77506 index 16ae887..d24f12b 100644
77507 --- a/net/rxrpc/ar-output.c
77508 +++ b/net/rxrpc/ar-output.c
77509 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77510 sp->hdr.cid = call->cid;
77511 sp->hdr.callNumber = call->call_id;
77512 sp->hdr.seq =
77513 - htonl(atomic_inc_return(&call->sequence));
77514 + htonl(atomic_inc_return_unchecked(&call->sequence));
77515 sp->hdr.serial =
77516 - htonl(atomic_inc_return(&conn->serial));
77517 + htonl(atomic_inc_return_unchecked(&conn->serial));
77518 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77519 sp->hdr.userStatus = 0;
77520 sp->hdr.securityIndex = conn->security_ix;
77521 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77522 index 2754f09..b20e38f 100644
77523 --- a/net/rxrpc/ar-peer.c
77524 +++ b/net/rxrpc/ar-peer.c
77525 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77526 INIT_LIST_HEAD(&peer->error_targets);
77527 spin_lock_init(&peer->lock);
77528 atomic_set(&peer->usage, 1);
77529 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77530 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77531 memcpy(&peer->srx, srx, sizeof(*srx));
77532
77533 rxrpc_assess_MTU_size(peer);
77534 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77535 index 38047f7..9f48511 100644
77536 --- a/net/rxrpc/ar-proc.c
77537 +++ b/net/rxrpc/ar-proc.c
77538 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77539 atomic_read(&conn->usage),
77540 rxrpc_conn_states[conn->state],
77541 key_serial(conn->key),
77542 - atomic_read(&conn->serial),
77543 - atomic_read(&conn->hi_serial));
77544 + atomic_read_unchecked(&conn->serial),
77545 + atomic_read_unchecked(&conn->hi_serial));
77546
77547 return 0;
77548 }
77549 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77550 index 92df566..87ec1bf 100644
77551 --- a/net/rxrpc/ar-transport.c
77552 +++ b/net/rxrpc/ar-transport.c
77553 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77554 spin_lock_init(&trans->client_lock);
77555 rwlock_init(&trans->conn_lock);
77556 atomic_set(&trans->usage, 1);
77557 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77558 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77559
77560 if (peer->srx.transport.family == AF_INET) {
77561 switch (peer->srx.transport_type) {
77562 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77563 index 7635107..4670276 100644
77564 --- a/net/rxrpc/rxkad.c
77565 +++ b/net/rxrpc/rxkad.c
77566 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77567
77568 len = iov[0].iov_len + iov[1].iov_len;
77569
77570 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77571 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77572 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77573
77574 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77575 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77576
77577 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77578
77579 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77580 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77581 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77582
77583 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77584 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77585 index 1e2eee8..ce3967e 100644
77586 --- a/net/sctp/proc.c
77587 +++ b/net/sctp/proc.c
77588 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77589 seq_printf(seq,
77590 "%8pK %8pK %-3d %-3d %-2d %-4d "
77591 "%4d %8d %8d %7d %5lu %-5d %5d ",
77592 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77593 + assoc, sk,
77594 + sctp_sk(sk)->type, sk->sk_state,
77595 assoc->state, hash,
77596 assoc->assoc_id,
77597 assoc->sndbuf_used,
77598 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77599 index 408ebd0..202aa85 100644
77600 --- a/net/sctp/socket.c
77601 +++ b/net/sctp/socket.c
77602 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77603 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77604 if (space_left < addrlen)
77605 return -ENOMEM;
77606 - if (copy_to_user(to, &temp, addrlen))
77607 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77608 return -EFAULT;
77609 to += addrlen;
77610 cnt++;
77611 diff --git a/net/socket.c b/net/socket.c
77612 index 28a96af..61a7a06 100644
77613 --- a/net/socket.c
77614 +++ b/net/socket.c
77615 @@ -88,6 +88,7 @@
77616 #include <linux/nsproxy.h>
77617 #include <linux/magic.h>
77618 #include <linux/slab.h>
77619 +#include <linux/in.h>
77620
77621 #include <asm/uaccess.h>
77622 #include <asm/unistd.h>
77623 @@ -105,6 +106,8 @@
77624 #include <linux/sockios.h>
77625 #include <linux/atalk.h>
77626
77627 +#include <linux/grsock.h>
77628 +
77629 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77630 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77631 unsigned long nr_segs, loff_t pos);
77632 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77633 &sockfs_dentry_operations, SOCKFS_MAGIC);
77634 }
77635
77636 -static struct vfsmount *sock_mnt __read_mostly;
77637 +struct vfsmount *sock_mnt __read_mostly;
77638
77639 static struct file_system_type sock_fs_type = {
77640 .name = "sockfs",
77641 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77642 return -EAFNOSUPPORT;
77643 if (type < 0 || type >= SOCK_MAX)
77644 return -EINVAL;
77645 + if (protocol < 0)
77646 + return -EINVAL;
77647
77648 /* Compatibility.
77649
77650 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77651 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77652 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77653
77654 + if(!gr_search_socket(family, type, protocol)) {
77655 + retval = -EACCES;
77656 + goto out;
77657 + }
77658 +
77659 + if (gr_handle_sock_all(family, type, protocol)) {
77660 + retval = -EACCES;
77661 + goto out;
77662 + }
77663 +
77664 retval = sock_create(family, type, protocol, &sock);
77665 if (retval < 0)
77666 goto out;
77667 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77668 if (sock) {
77669 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
77670 if (err >= 0) {
77671 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77672 + err = -EACCES;
77673 + goto error;
77674 + }
77675 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77676 + if (err)
77677 + goto error;
77678 +
77679 err = security_socket_bind(sock,
77680 (struct sockaddr *)&address,
77681 addrlen);
77682 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77683 (struct sockaddr *)
77684 &address, addrlen);
77685 }
77686 +error:
77687 fput_light(sock->file, fput_needed);
77688 }
77689 return err;
77690 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77691 if ((unsigned)backlog > somaxconn)
77692 backlog = somaxconn;
77693
77694 + if (gr_handle_sock_server_other(sock->sk)) {
77695 + err = -EPERM;
77696 + goto error;
77697 + }
77698 +
77699 + err = gr_search_listen(sock);
77700 + if (err)
77701 + goto error;
77702 +
77703 err = security_socket_listen(sock, backlog);
77704 if (!err)
77705 err = sock->ops->listen(sock, backlog);
77706
77707 +error:
77708 fput_light(sock->file, fput_needed);
77709 }
77710 return err;
77711 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77712 newsock->type = sock->type;
77713 newsock->ops = sock->ops;
77714
77715 + if (gr_handle_sock_server_other(sock->sk)) {
77716 + err = -EPERM;
77717 + sock_release(newsock);
77718 + goto out_put;
77719 + }
77720 +
77721 + err = gr_search_accept(sock);
77722 + if (err) {
77723 + sock_release(newsock);
77724 + goto out_put;
77725 + }
77726 +
77727 /*
77728 * We don't need try_module_get here, as the listening socket (sock)
77729 * has the protocol module (sock->ops->owner) held.
77730 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77731 fd_install(newfd, newfile);
77732 err = newfd;
77733
77734 + gr_attach_curr_ip(newsock->sk);
77735 +
77736 out_put:
77737 fput_light(sock->file, fput_needed);
77738 out:
77739 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77740 int, addrlen)
77741 {
77742 struct socket *sock;
77743 + struct sockaddr *sck;
77744 struct sockaddr_storage address;
77745 int err, fput_needed;
77746
77747 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77748 if (err < 0)
77749 goto out_put;
77750
77751 + sck = (struct sockaddr *)&address;
77752 +
77753 + if (gr_handle_sock_client(sck)) {
77754 + err = -EACCES;
77755 + goto out_put;
77756 + }
77757 +
77758 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77759 + if (err)
77760 + goto out_put;
77761 +
77762 err =
77763 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77764 if (err)
77765 @@ -1970,7 +2030,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77766 * checking falls down on this.
77767 */
77768 if (copy_from_user(ctl_buf,
77769 - (void __user __force *)msg_sys->msg_control,
77770 + (void __force_user *)msg_sys->msg_control,
77771 ctl_len))
77772 goto out_freectl;
77773 msg_sys->msg_control = ctl_buf;
77774 @@ -2140,7 +2200,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77775 * kernel msghdr to use the kernel address space)
77776 */
77777
77778 - uaddr = (__force void __user *)msg_sys->msg_name;
77779 + uaddr = (void __force_user *)msg_sys->msg_name;
77780 uaddr_len = COMPAT_NAMELEN(msg);
77781 if (MSG_CMSG_COMPAT & flags) {
77782 err = verify_compat_iovec(msg_sys, iov,
77783 @@ -2768,7 +2828,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77784 }
77785
77786 ifr = compat_alloc_user_space(buf_size);
77787 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77788 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77789
77790 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77791 return -EFAULT;
77792 @@ -2792,12 +2852,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77793 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77794
77795 if (copy_in_user(rxnfc, compat_rxnfc,
77796 - (void *)(&rxnfc->fs.m_ext + 1) -
77797 - (void *)rxnfc) ||
77798 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77799 + (void __user *)rxnfc) ||
77800 copy_in_user(&rxnfc->fs.ring_cookie,
77801 &compat_rxnfc->fs.ring_cookie,
77802 - (void *)(&rxnfc->fs.location + 1) -
77803 - (void *)&rxnfc->fs.ring_cookie) ||
77804 + (void __user *)(&rxnfc->fs.location + 1) -
77805 + (void __user *)&rxnfc->fs.ring_cookie) ||
77806 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77807 sizeof(rxnfc->rule_cnt)))
77808 return -EFAULT;
77809 @@ -2809,12 +2869,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77810
77811 if (convert_out) {
77812 if (copy_in_user(compat_rxnfc, rxnfc,
77813 - (const void *)(&rxnfc->fs.m_ext + 1) -
77814 - (const void *)rxnfc) ||
77815 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77816 + (const void __user *)rxnfc) ||
77817 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77818 &rxnfc->fs.ring_cookie,
77819 - (const void *)(&rxnfc->fs.location + 1) -
77820 - (const void *)&rxnfc->fs.ring_cookie) ||
77821 + (const void __user *)(&rxnfc->fs.location + 1) -
77822 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77823 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77824 sizeof(rxnfc->rule_cnt)))
77825 return -EFAULT;
77826 @@ -2884,7 +2944,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77827 old_fs = get_fs();
77828 set_fs(KERNEL_DS);
77829 err = dev_ioctl(net, cmd,
77830 - (struct ifreq __user __force *) &kifr);
77831 + (struct ifreq __force_user *) &kifr);
77832 set_fs(old_fs);
77833
77834 return err;
77835 @@ -2993,7 +3053,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77836
77837 old_fs = get_fs();
77838 set_fs(KERNEL_DS);
77839 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77840 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77841 set_fs(old_fs);
77842
77843 if (cmd == SIOCGIFMAP && !err) {
77844 @@ -3098,7 +3158,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77845 ret |= __get_user(rtdev, &(ur4->rt_dev));
77846 if (rtdev) {
77847 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77848 - r4.rt_dev = (char __user __force *)devname;
77849 + r4.rt_dev = (char __force_user *)devname;
77850 devname[15] = 0;
77851 } else
77852 r4.rt_dev = NULL;
77853 @@ -3324,8 +3384,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77854 int __user *uoptlen;
77855 int err;
77856
77857 - uoptval = (char __user __force *) optval;
77858 - uoptlen = (int __user __force *) optlen;
77859 + uoptval = (char __force_user *) optval;
77860 + uoptlen = (int __force_user *) optlen;
77861
77862 set_fs(KERNEL_DS);
77863 if (level == SOL_SOCKET)
77864 @@ -3345,7 +3405,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77865 char __user *uoptval;
77866 int err;
77867
77868 - uoptval = (char __user __force *) optval;
77869 + uoptval = (char __force_user *) optval;
77870
77871 set_fs(KERNEL_DS);
77872 if (level == SOL_SOCKET)
77873 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77874 index 8efd96c..b492ab2 100644
77875 --- a/net/sunrpc/sched.c
77876 +++ b/net/sunrpc/sched.c
77877 @@ -239,9 +239,9 @@ static int rpc_wait_bit_killable(void *word)
77878 #ifdef RPC_DEBUG
77879 static void rpc_task_set_debuginfo(struct rpc_task *task)
77880 {
77881 - static atomic_t rpc_pid;
77882 + static atomic_unchecked_t rpc_pid;
77883
77884 - task->tk_pid = atomic_inc_return(&rpc_pid);
77885 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77886 }
77887 #else
77888 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77889 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
77890 index 4645709..d41d668 100644
77891 --- a/net/sunrpc/svcsock.c
77892 +++ b/net/sunrpc/svcsock.c
77893 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
77894 int buflen, unsigned int base)
77895 {
77896 size_t save_iovlen;
77897 - void __user *save_iovbase;
77898 + void *save_iovbase;
77899 unsigned int i;
77900 int ret;
77901
77902 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77903 index 09af4fa..77110a9 100644
77904 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77905 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77906 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77907 static unsigned int min_max_inline = 4096;
77908 static unsigned int max_max_inline = 65536;
77909
77910 -atomic_t rdma_stat_recv;
77911 -atomic_t rdma_stat_read;
77912 -atomic_t rdma_stat_write;
77913 -atomic_t rdma_stat_sq_starve;
77914 -atomic_t rdma_stat_rq_starve;
77915 -atomic_t rdma_stat_rq_poll;
77916 -atomic_t rdma_stat_rq_prod;
77917 -atomic_t rdma_stat_sq_poll;
77918 -atomic_t rdma_stat_sq_prod;
77919 +atomic_unchecked_t rdma_stat_recv;
77920 +atomic_unchecked_t rdma_stat_read;
77921 +atomic_unchecked_t rdma_stat_write;
77922 +atomic_unchecked_t rdma_stat_sq_starve;
77923 +atomic_unchecked_t rdma_stat_rq_starve;
77924 +atomic_unchecked_t rdma_stat_rq_poll;
77925 +atomic_unchecked_t rdma_stat_rq_prod;
77926 +atomic_unchecked_t rdma_stat_sq_poll;
77927 +atomic_unchecked_t rdma_stat_sq_prod;
77928
77929 /* Temporary NFS request map and context caches */
77930 struct kmem_cache *svc_rdma_map_cachep;
77931 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
77932 len -= *ppos;
77933 if (len > *lenp)
77934 len = *lenp;
77935 - if (len && copy_to_user(buffer, str_buf, len))
77936 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77937 return -EFAULT;
77938 *lenp = len;
77939 *ppos += len;
77940 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
77941 {
77942 .procname = "rdma_stat_read",
77943 .data = &rdma_stat_read,
77944 - .maxlen = sizeof(atomic_t),
77945 + .maxlen = sizeof(atomic_unchecked_t),
77946 .mode = 0644,
77947 .proc_handler = read_reset_stat,
77948 },
77949 {
77950 .procname = "rdma_stat_recv",
77951 .data = &rdma_stat_recv,
77952 - .maxlen = sizeof(atomic_t),
77953 + .maxlen = sizeof(atomic_unchecked_t),
77954 .mode = 0644,
77955 .proc_handler = read_reset_stat,
77956 },
77957 {
77958 .procname = "rdma_stat_write",
77959 .data = &rdma_stat_write,
77960 - .maxlen = sizeof(atomic_t),
77961 + .maxlen = sizeof(atomic_unchecked_t),
77962 .mode = 0644,
77963 .proc_handler = read_reset_stat,
77964 },
77965 {
77966 .procname = "rdma_stat_sq_starve",
77967 .data = &rdma_stat_sq_starve,
77968 - .maxlen = sizeof(atomic_t),
77969 + .maxlen = sizeof(atomic_unchecked_t),
77970 .mode = 0644,
77971 .proc_handler = read_reset_stat,
77972 },
77973 {
77974 .procname = "rdma_stat_rq_starve",
77975 .data = &rdma_stat_rq_starve,
77976 - .maxlen = sizeof(atomic_t),
77977 + .maxlen = sizeof(atomic_unchecked_t),
77978 .mode = 0644,
77979 .proc_handler = read_reset_stat,
77980 },
77981 {
77982 .procname = "rdma_stat_rq_poll",
77983 .data = &rdma_stat_rq_poll,
77984 - .maxlen = sizeof(atomic_t),
77985 + .maxlen = sizeof(atomic_unchecked_t),
77986 .mode = 0644,
77987 .proc_handler = read_reset_stat,
77988 },
77989 {
77990 .procname = "rdma_stat_rq_prod",
77991 .data = &rdma_stat_rq_prod,
77992 - .maxlen = sizeof(atomic_t),
77993 + .maxlen = sizeof(atomic_unchecked_t),
77994 .mode = 0644,
77995 .proc_handler = read_reset_stat,
77996 },
77997 {
77998 .procname = "rdma_stat_sq_poll",
77999 .data = &rdma_stat_sq_poll,
78000 - .maxlen = sizeof(atomic_t),
78001 + .maxlen = sizeof(atomic_unchecked_t),
78002 .mode = 0644,
78003 .proc_handler = read_reset_stat,
78004 },
78005 {
78006 .procname = "rdma_stat_sq_prod",
78007 .data = &rdma_stat_sq_prod,
78008 - .maxlen = sizeof(atomic_t),
78009 + .maxlen = sizeof(atomic_unchecked_t),
78010 .mode = 0644,
78011 .proc_handler = read_reset_stat,
78012 },
78013 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78014 index df67211..c354b13 100644
78015 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78016 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78017 @@ -499,7 +499,7 @@ next_sge:
78018 svc_rdma_put_context(ctxt, 0);
78019 goto out;
78020 }
78021 - atomic_inc(&rdma_stat_read);
78022 + atomic_inc_unchecked(&rdma_stat_read);
78023
78024 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
78025 chl_map->ch[ch_no].count -= read_wr.num_sge;
78026 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78027 dto_q);
78028 list_del_init(&ctxt->dto_q);
78029 } else {
78030 - atomic_inc(&rdma_stat_rq_starve);
78031 + atomic_inc_unchecked(&rdma_stat_rq_starve);
78032 clear_bit(XPT_DATA, &xprt->xpt_flags);
78033 ctxt = NULL;
78034 }
78035 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78036 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
78037 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
78038 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
78039 - atomic_inc(&rdma_stat_recv);
78040 + atomic_inc_unchecked(&rdma_stat_recv);
78041
78042 /* Build up the XDR from the receive buffers. */
78043 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
78044 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78045 index 249a835..fb2794b 100644
78046 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78047 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78048 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
78049 write_wr.wr.rdma.remote_addr = to;
78050
78051 /* Post It */
78052 - atomic_inc(&rdma_stat_write);
78053 + atomic_inc_unchecked(&rdma_stat_write);
78054 if (svc_rdma_send(xprt, &write_wr))
78055 goto err;
78056 return 0;
78057 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78058 index 894cb42..cf5bafb 100644
78059 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
78060 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78061 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78062 return;
78063
78064 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
78065 - atomic_inc(&rdma_stat_rq_poll);
78066 + atomic_inc_unchecked(&rdma_stat_rq_poll);
78067
78068 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
78069 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
78070 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78071 }
78072
78073 if (ctxt)
78074 - atomic_inc(&rdma_stat_rq_prod);
78075 + atomic_inc_unchecked(&rdma_stat_rq_prod);
78076
78077 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
78078 /*
78079 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78080 return;
78081
78082 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
78083 - atomic_inc(&rdma_stat_sq_poll);
78084 + atomic_inc_unchecked(&rdma_stat_sq_poll);
78085 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
78086 if (wc.status != IB_WC_SUCCESS)
78087 /* Close the transport */
78088 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78089 }
78090
78091 if (ctxt)
78092 - atomic_inc(&rdma_stat_sq_prod);
78093 + atomic_inc_unchecked(&rdma_stat_sq_prod);
78094 }
78095
78096 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
78097 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
78098 spin_lock_bh(&xprt->sc_lock);
78099 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
78100 spin_unlock_bh(&xprt->sc_lock);
78101 - atomic_inc(&rdma_stat_sq_starve);
78102 + atomic_inc_unchecked(&rdma_stat_sq_starve);
78103
78104 /* See if we can opportunistically reap SQ WR to make room */
78105 sq_cq_reap(xprt);
78106 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
78107 index e758139..d29ea47 100644
78108 --- a/net/sysctl_net.c
78109 +++ b/net/sysctl_net.c
78110 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
78111 struct ctl_table *table)
78112 {
78113 /* Allow network administrator to have same access as root. */
78114 - if (capable(CAP_NET_ADMIN)) {
78115 + if (capable_nolog(CAP_NET_ADMIN)) {
78116 int mode = (table->mode >> 6) & 7;
78117 return (mode << 6) | (mode << 3) | mode;
78118 }
78119 diff --git a/net/tipc/link.c b/net/tipc/link.c
78120 index ac1832a..533ed97 100644
78121 --- a/net/tipc/link.c
78122 +++ b/net/tipc/link.c
78123 @@ -1205,7 +1205,7 @@ static int link_send_sections_long(struct tipc_port *sender,
78124 struct tipc_msg fragm_hdr;
78125 struct sk_buff *buf, *buf_chain, *prev;
78126 u32 fragm_crs, fragm_rest, hsz, sect_rest;
78127 - const unchar *sect_crs;
78128 + const unchar __user *sect_crs;
78129 int curr_sect;
78130 u32 fragm_no;
78131
78132 @@ -1249,7 +1249,7 @@ again:
78133
78134 if (!sect_rest) {
78135 sect_rest = msg_sect[++curr_sect].iov_len;
78136 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
78137 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
78138 }
78139
78140 if (sect_rest < fragm_rest)
78141 @@ -1268,7 +1268,7 @@ error:
78142 }
78143 } else
78144 skb_copy_to_linear_data_offset(buf, fragm_crs,
78145 - sect_crs, sz);
78146 + (const void __force_kernel *)sect_crs, sz);
78147 sect_crs += sz;
78148 sect_rest -= sz;
78149 fragm_crs += sz;
78150 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
78151 index 3e4d3e2..27b55dc 100644
78152 --- a/net/tipc/msg.c
78153 +++ b/net/tipc/msg.c
78154 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
78155 msg_sect[cnt].iov_len);
78156 else
78157 skb_copy_to_linear_data_offset(*buf, pos,
78158 - msg_sect[cnt].iov_base,
78159 + (const void __force_kernel *)msg_sect[cnt].iov_base,
78160 msg_sect[cnt].iov_len);
78161 pos += msg_sect[cnt].iov_len;
78162 }
78163 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
78164 index 8c49566..14510cb 100644
78165 --- a/net/tipc/subscr.c
78166 +++ b/net/tipc/subscr.c
78167 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
78168 {
78169 struct iovec msg_sect;
78170
78171 - msg_sect.iov_base = (void *)&sub->evt;
78172 + msg_sect.iov_base = (void __force_user *)&sub->evt;
78173 msg_sect.iov_len = sizeof(struct tipc_event);
78174
78175 sub->evt.event = htohl(event, sub->swap);
78176 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
78177 index 85d3bb7..79f4487 100644
78178 --- a/net/unix/af_unix.c
78179 +++ b/net/unix/af_unix.c
78180 @@ -770,6 +770,12 @@ static struct sock *unix_find_other(struct net *net,
78181 err = -ECONNREFUSED;
78182 if (!S_ISSOCK(inode->i_mode))
78183 goto put_fail;
78184 +
78185 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
78186 + err = -EACCES;
78187 + goto put_fail;
78188 + }
78189 +
78190 u = unix_find_socket_byinode(inode);
78191 if (!u)
78192 goto put_fail;
78193 @@ -790,6 +796,13 @@ static struct sock *unix_find_other(struct net *net,
78194 if (u) {
78195 struct dentry *dentry;
78196 dentry = unix_sk(u)->dentry;
78197 +
78198 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
78199 + err = -EPERM;
78200 + sock_put(u);
78201 + goto fail;
78202 + }
78203 +
78204 if (dentry)
78205 touch_atime(unix_sk(u)->mnt, dentry);
78206 } else
78207 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
78208 err = security_path_mknod(&path, dentry, mode, 0);
78209 if (err)
78210 goto out_mknod_drop_write;
78211 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
78212 + err = -EACCES;
78213 + goto out_mknod_drop_write;
78214 + }
78215 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
78216 out_mknod_drop_write:
78217 mnt_drop_write(path.mnt);
78218 if (err)
78219 goto out_mknod_dput;
78220 +
78221 + gr_handle_create(dentry, path.mnt);
78222 +
78223 mutex_unlock(&path.dentry->d_inode->i_mutex);
78224 dput(path.dentry);
78225 path.dentry = dentry;
78226 diff --git a/net/wireless/core.h b/net/wireless/core.h
78227 index 43ad9c8..ab5127c 100644
78228 --- a/net/wireless/core.h
78229 +++ b/net/wireless/core.h
78230 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
78231 struct mutex mtx;
78232
78233 /* rfkill support */
78234 - struct rfkill_ops rfkill_ops;
78235 + rfkill_ops_no_const rfkill_ops;
78236 struct rfkill *rfkill;
78237 struct work_struct rfkill_sync;
78238
78239 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
78240 index 0af7f54..c916d2f 100644
78241 --- a/net/wireless/wext-core.c
78242 +++ b/net/wireless/wext-core.c
78243 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78244 */
78245
78246 /* Support for very large requests */
78247 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
78248 - (user_length > descr->max_tokens)) {
78249 + if (user_length > descr->max_tokens) {
78250 /* Allow userspace to GET more than max so
78251 * we can support any size GET requests.
78252 * There is still a limit : -ENOMEM.
78253 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78254 }
78255 }
78256
78257 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
78258 - /*
78259 - * If this is a GET, but not NOMAX, it means that the extra
78260 - * data is not bounded by userspace, but by max_tokens. Thus
78261 - * set the length to max_tokens. This matches the extra data
78262 - * allocation.
78263 - * The driver should fill it with the number of tokens it
78264 - * provided, and it may check iwp->length rather than having
78265 - * knowledge of max_tokens. If the driver doesn't change the
78266 - * iwp->length, this ioctl just copies back max_token tokens
78267 - * filled with zeroes. Hopefully the driver isn't claiming
78268 - * them to be valid data.
78269 - */
78270 - iwp->length = descr->max_tokens;
78271 - }
78272 -
78273 err = handler(dev, info, (union iwreq_data *) iwp, extra);
78274
78275 iwp->length += essid_compat;
78276 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
78277 index 7661576..80f7627 100644
78278 --- a/net/xfrm/xfrm_policy.c
78279 +++ b/net/xfrm/xfrm_policy.c
78280 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
78281 {
78282 policy->walk.dead = 1;
78283
78284 - atomic_inc(&policy->genid);
78285 + atomic_inc_unchecked(&policy->genid);
78286
78287 if (del_timer(&policy->timer))
78288 xfrm_pol_put(policy);
78289 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78290 hlist_add_head(&policy->bydst, chain);
78291 xfrm_pol_hold(policy);
78292 net->xfrm.policy_count[dir]++;
78293 - atomic_inc(&flow_cache_genid);
78294 + atomic_inc_unchecked(&flow_cache_genid);
78295 if (delpol)
78296 __xfrm_policy_unlink(delpol, dir);
78297 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78298 @@ -1530,7 +1530,7 @@ free_dst:
78299 goto out;
78300 }
78301
78302 -static int inline
78303 +static inline int
78304 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78305 {
78306 if (!*target) {
78307 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78308 return 0;
78309 }
78310
78311 -static int inline
78312 +static inline int
78313 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78314 {
78315 #ifdef CONFIG_XFRM_SUB_POLICY
78316 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78317 #endif
78318 }
78319
78320 -static int inline
78321 +static inline int
78322 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78323 {
78324 #ifdef CONFIG_XFRM_SUB_POLICY
78325 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78326
78327 xdst->num_pols = num_pols;
78328 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78329 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78330 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78331
78332 return xdst;
78333 }
78334 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78335 if (xdst->xfrm_genid != dst->xfrm->genid)
78336 return 0;
78337 if (xdst->num_pols > 0 &&
78338 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78339 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78340 return 0;
78341
78342 mtu = dst_mtu(dst->child);
78343 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78344 sizeof(pol->xfrm_vec[i].saddr));
78345 pol->xfrm_vec[i].encap_family = mp->new_family;
78346 /* flush bundles */
78347 - atomic_inc(&pol->genid);
78348 + atomic_inc_unchecked(&pol->genid);
78349 }
78350 }
78351
78352 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78353 index d2b366c..51ff91ebc 100644
78354 --- a/scripts/Makefile.build
78355 +++ b/scripts/Makefile.build
78356 @@ -109,7 +109,7 @@ endif
78357 endif
78358
78359 # Do not include host rules unless needed
78360 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78361 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78362 include scripts/Makefile.host
78363 endif
78364
78365 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78366 index 686cb0d..9d653bf 100644
78367 --- a/scripts/Makefile.clean
78368 +++ b/scripts/Makefile.clean
78369 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78370 __clean-files := $(extra-y) $(always) \
78371 $(targets) $(clean-files) \
78372 $(host-progs) \
78373 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78374 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78375 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78376
78377 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78378
78379 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78380 index 1ac414f..a1c1451 100644
78381 --- a/scripts/Makefile.host
78382 +++ b/scripts/Makefile.host
78383 @@ -31,6 +31,7 @@
78384 # Note: Shared libraries consisting of C++ files are not supported
78385
78386 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78387 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78388
78389 # C code
78390 # Executables compiled from a single .c file
78391 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78392 # Shared libaries (only .c supported)
78393 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78394 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78395 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78396 # Remove .so files from "xxx-objs"
78397 host-cobjs := $(filter-out %.so,$(host-cobjs))
78398
78399 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78400 index cb1f50c..cef2a7c 100644
78401 --- a/scripts/basic/fixdep.c
78402 +++ b/scripts/basic/fixdep.c
78403 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78404 /*
78405 * Lookup a value in the configuration string.
78406 */
78407 -static int is_defined_config(const char *name, int len, unsigned int hash)
78408 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78409 {
78410 struct item *aux;
78411
78412 @@ -211,10 +211,10 @@ static void clear_config(void)
78413 /*
78414 * Record the use of a CONFIG_* word.
78415 */
78416 -static void use_config(const char *m, int slen)
78417 +static void use_config(const char *m, unsigned int slen)
78418 {
78419 unsigned int hash = strhash(m, slen);
78420 - int c, i;
78421 + unsigned int c, i;
78422
78423 if (is_defined_config(m, slen, hash))
78424 return;
78425 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78426
78427 static void parse_config_file(const char *map, size_t len)
78428 {
78429 - const int *end = (const int *) (map + len);
78430 + const unsigned int *end = (const unsigned int *) (map + len);
78431 /* start at +1, so that p can never be < map */
78432 - const int *m = (const int *) map + 1;
78433 + const unsigned int *m = (const unsigned int *) map + 1;
78434 const char *p, *q;
78435
78436 for (; m < end; m++) {
78437 @@ -406,7 +406,7 @@ static void print_deps(void)
78438 static void traps(void)
78439 {
78440 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78441 - int *p = (int *)test;
78442 + unsigned int *p = (unsigned int *)test;
78443
78444 if (*p != INT_CONF) {
78445 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78446 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78447 new file mode 100644
78448 index 0000000..8729101
78449 --- /dev/null
78450 +++ b/scripts/gcc-plugin.sh
78451 @@ -0,0 +1,2 @@
78452 +#!/bin/sh
78453 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78454 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78455 index b89efe6..2c30808 100644
78456 --- a/scripts/mod/file2alias.c
78457 +++ b/scripts/mod/file2alias.c
78458 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
78459 unsigned long size, unsigned long id_size,
78460 void *symval)
78461 {
78462 - int i;
78463 + unsigned int i;
78464
78465 if (size % id_size || size < id_size) {
78466 if (cross_build != 0)
78467 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
78468 /* USB is special because the bcdDevice can be matched against a numeric range */
78469 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78470 static void do_usb_entry(struct usb_device_id *id,
78471 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78472 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78473 unsigned char range_lo, unsigned char range_hi,
78474 unsigned char max, struct module *mod)
78475 {
78476 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78477 {
78478 unsigned int devlo, devhi;
78479 unsigned char chi, clo, max;
78480 - int ndigits;
78481 + unsigned int ndigits;
78482
78483 id->match_flags = TO_NATIVE(id->match_flags);
78484 id->idVendor = TO_NATIVE(id->idVendor);
78485 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78486 for (i = 0; i < count; i++) {
78487 const char *id = (char *)devs[i].id;
78488 char acpi_id[sizeof(devs[0].id)];
78489 - int j;
78490 + unsigned int j;
78491
78492 buf_printf(&mod->dev_table_buf,
78493 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78494 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78495
78496 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78497 const char *id = (char *)card->devs[j].id;
78498 - int i2, j2;
78499 + unsigned int i2, j2;
78500 int dup = 0;
78501
78502 if (!id[0])
78503 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78504 /* add an individual alias for every device entry */
78505 if (!dup) {
78506 char acpi_id[sizeof(card->devs[0].id)];
78507 - int k;
78508 + unsigned int k;
78509
78510 buf_printf(&mod->dev_table_buf,
78511 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78512 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78513 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78514 char *alias)
78515 {
78516 - int i, j;
78517 + unsigned int i, j;
78518
78519 sprintf(alias, "dmi*");
78520
78521 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78522 index c4e7d15..4241aef 100644
78523 --- a/scripts/mod/modpost.c
78524 +++ b/scripts/mod/modpost.c
78525 @@ -922,6 +922,7 @@ enum mismatch {
78526 ANY_INIT_TO_ANY_EXIT,
78527 ANY_EXIT_TO_ANY_INIT,
78528 EXPORT_TO_INIT_EXIT,
78529 + DATA_TO_TEXT
78530 };
78531
78532 struct sectioncheck {
78533 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
78534 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78535 .mismatch = EXPORT_TO_INIT_EXIT,
78536 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78537 +},
78538 +/* Do not reference code from writable data */
78539 +{
78540 + .fromsec = { DATA_SECTIONS, NULL },
78541 + .tosec = { TEXT_SECTIONS, NULL },
78542 + .mismatch = DATA_TO_TEXT
78543 }
78544 };
78545
78546 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78547 continue;
78548 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78549 continue;
78550 - if (sym->st_value == addr)
78551 - return sym;
78552 /* Find a symbol nearby - addr are maybe negative */
78553 d = sym->st_value - addr;
78554 + if (d == 0)
78555 + return sym;
78556 if (d < 0)
78557 d = addr - sym->st_value;
78558 if (d < distance) {
78559 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
78560 tosym, prl_to, prl_to, tosym);
78561 free(prl_to);
78562 break;
78563 + case DATA_TO_TEXT:
78564 +/*
78565 + fprintf(stderr,
78566 + "The variable %s references\n"
78567 + "the %s %s%s%s\n",
78568 + fromsym, to, sec2annotation(tosec), tosym, to_p);
78569 +*/
78570 + break;
78571 }
78572 fprintf(stderr, "\n");
78573 }
78574 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78575 static void check_sec_ref(struct module *mod, const char *modname,
78576 struct elf_info *elf)
78577 {
78578 - int i;
78579 + unsigned int i;
78580 Elf_Shdr *sechdrs = elf->sechdrs;
78581
78582 /* Walk through all sections */
78583 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78584 va_end(ap);
78585 }
78586
78587 -void buf_write(struct buffer *buf, const char *s, int len)
78588 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78589 {
78590 if (buf->size - buf->pos < len) {
78591 buf->size += len + SZ;
78592 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78593 if (fstat(fileno(file), &st) < 0)
78594 goto close_write;
78595
78596 - if (st.st_size != b->pos)
78597 + if (st.st_size != (off_t)b->pos)
78598 goto close_write;
78599
78600 tmp = NOFAIL(malloc(b->pos));
78601 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78602 index 51207e4..f7d603d 100644
78603 --- a/scripts/mod/modpost.h
78604 +++ b/scripts/mod/modpost.h
78605 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78606
78607 struct buffer {
78608 char *p;
78609 - int pos;
78610 - int size;
78611 + unsigned int pos;
78612 + unsigned int size;
78613 };
78614
78615 void __attribute__((format(printf, 2, 3)))
78616 buf_printf(struct buffer *buf, const char *fmt, ...);
78617
78618 void
78619 -buf_write(struct buffer *buf, const char *s, int len);
78620 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78621
78622 struct module {
78623 struct module *next;
78624 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78625 index 9dfcd6d..099068e 100644
78626 --- a/scripts/mod/sumversion.c
78627 +++ b/scripts/mod/sumversion.c
78628 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78629 goto out;
78630 }
78631
78632 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78633 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78634 warn("writing sum in %s failed: %s\n",
78635 filename, strerror(errno));
78636 goto out;
78637 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78638 index 5c11312..72742b5 100644
78639 --- a/scripts/pnmtologo.c
78640 +++ b/scripts/pnmtologo.c
78641 @@ -237,14 +237,14 @@ static void write_header(void)
78642 fprintf(out, " * Linux logo %s\n", logoname);
78643 fputs(" */\n\n", out);
78644 fputs("#include <linux/linux_logo.h>\n\n", out);
78645 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78646 + fprintf(out, "static unsigned char %s_data[] = {\n",
78647 logoname);
78648 }
78649
78650 static void write_footer(void)
78651 {
78652 fputs("\n};\n\n", out);
78653 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78654 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78655 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78656 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78657 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78658 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78659 fputs("\n};\n\n", out);
78660
78661 /* write logo clut */
78662 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78663 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78664 logoname);
78665 write_hex_cnt = 0;
78666 for (i = 0; i < logo_clutsize; i++) {
78667 diff --git a/scripts/tags.sh b/scripts/tags.sh
78668 index 833813a..0bc8588 100755
78669 --- a/scripts/tags.sh
78670 +++ b/scripts/tags.sh
78671 @@ -116,7 +116,7 @@ docscope()
78672
78673 dogtags()
78674 {
78675 - all_sources | gtags -f -
78676 + all_sources | gtags -i -f -
78677 }
78678
78679 exuberant()
78680 diff --git a/security/Kconfig b/security/Kconfig
78681 index 51bd5a0..c37f5e6 100644
78682 --- a/security/Kconfig
78683 +++ b/security/Kconfig
78684 @@ -4,6 +4,640 @@
78685
78686 menu "Security options"
78687
78688 +source grsecurity/Kconfig
78689 +
78690 +menu "PaX"
78691 +
78692 + config ARCH_TRACK_EXEC_LIMIT
78693 + bool
78694 +
78695 + config PAX_KERNEXEC_PLUGIN
78696 + bool
78697 +
78698 + config PAX_PER_CPU_PGD
78699 + bool
78700 +
78701 + config TASK_SIZE_MAX_SHIFT
78702 + int
78703 + depends on X86_64
78704 + default 47 if !PAX_PER_CPU_PGD
78705 + default 42 if PAX_PER_CPU_PGD
78706 +
78707 + config PAX_ENABLE_PAE
78708 + bool
78709 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78710 +
78711 +config PAX
78712 + bool "Enable various PaX features"
78713 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78714 + help
78715 + This allows you to enable various PaX features. PaX adds
78716 + intrusion prevention mechanisms to the kernel that reduce
78717 + the risks posed by exploitable memory corruption bugs.
78718 +
78719 +menu "PaX Control"
78720 + depends on PAX
78721 +
78722 +config PAX_SOFTMODE
78723 + bool 'Support soft mode'
78724 + help
78725 + Enabling this option will allow you to run PaX in soft mode, that
78726 + is, PaX features will not be enforced by default, only on executables
78727 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
78728 + support as they are the only way to mark executables for soft mode use.
78729 +
78730 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78731 + line option on boot. Furthermore you can control various PaX features
78732 + at runtime via the entries in /proc/sys/kernel/pax.
78733 +
78734 +config PAX_EI_PAX
78735 + bool 'Use legacy ELF header marking'
78736 + help
78737 + Enabling this option will allow you to control PaX features on
78738 + a per executable basis via the 'chpax' utility available at
78739 + http://pax.grsecurity.net/. The control flags will be read from
78740 + an otherwise reserved part of the ELF header. This marking has
78741 + numerous drawbacks (no support for soft-mode, toolchain does not
78742 + know about the non-standard use of the ELF header) therefore it
78743 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
78744 + support.
78745 +
78746 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78747 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
78748 + option otherwise they will not get any protection.
78749 +
78750 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
78751 + support as well, they will override the legacy EI_PAX marks.
78752 +
78753 +config PAX_PT_PAX_FLAGS
78754 + bool 'Use ELF program header marking'
78755 + help
78756 + Enabling this option will allow you to control PaX features on
78757 + a per executable basis via the 'paxctl' utility available at
78758 + http://pax.grsecurity.net/. The control flags will be read from
78759 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78760 + has the benefits of supporting both soft mode and being fully
78761 + integrated into the toolchain (the binutils patch is available
78762 + from http://pax.grsecurity.net).
78763 +
78764 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78765 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
78766 + support otherwise they will not get any protection.
78767 +
78768 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78769 + must make sure that the marks are the same if a binary has both marks.
78770 +
78771 + Note that if you enable the legacy EI_PAX marking support as well,
78772 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78773 +
78774 +config PAX_XATTR_PAX_FLAGS
78775 + bool 'Use filesystem extended attributes marking'
78776 + select CIFS_XATTR if CIFS
78777 + select EXT2_FS_XATTR if EXT2_FS
78778 + select EXT3_FS_XATTR if EXT3_FS
78779 + select EXT4_FS_XATTR if EXT4_FS
78780 + select JFFS2_FS_XATTR if JFFS2_FS
78781 + select REISERFS_FS_XATTR if REISERFS_FS
78782 + select SQUASHFS_XATTR if SQUASHFS
78783 + select TMPFS_XATTR if TMPFS
78784 + select UBIFS_FS_XATTR if UBIFS_FS
78785 + help
78786 + Enabling this option will allow you to control PaX features on
78787 + a per executable basis via the 'setfattr' utility. The control
78788 + flags will be read from the user.pax.flags extended attribute of
78789 + the file. This marking has the benefit of supporting binary-only
78790 + applications that self-check themselves (e.g., skype) and would
78791 + not tolerate chpax/paxctl changes. The main drawback is that
78792 + extended attributes are not supported by some filesystems (e.g.,
78793 + isofs, udf, vfat) so copying files through such filesystems will
78794 + lose the extended attributes and these PaX markings.
78795 +
78796 + If you have applications not marked by the PT_PAX_FLAGS ELF program
78797 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
78798 + support otherwise they will not get any protection.
78799 +
78800 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78801 + must make sure that the marks are the same if a binary has both marks.
78802 +
78803 + Note that if you enable the legacy EI_PAX marking support as well,
78804 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
78805 +
78806 +choice
78807 + prompt 'MAC system integration'
78808 + default PAX_HAVE_ACL_FLAGS
78809 + help
78810 + Mandatory Access Control systems have the option of controlling
78811 + PaX flags on a per executable basis, choose the method supported
78812 + by your particular system.
78813 +
78814 + - "none": if your MAC system does not interact with PaX,
78815 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78816 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78817 +
78818 + NOTE: this option is for developers/integrators only.
78819 +
78820 + config PAX_NO_ACL_FLAGS
78821 + bool 'none'
78822 +
78823 + config PAX_HAVE_ACL_FLAGS
78824 + bool 'direct'
78825 +
78826 + config PAX_HOOK_ACL_FLAGS
78827 + bool 'hook'
78828 +endchoice
78829 +
78830 +endmenu
78831 +
78832 +menu "Non-executable pages"
78833 + depends on PAX
78834 +
78835 +config PAX_NOEXEC
78836 + bool "Enforce non-executable pages"
78837 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
78838 + help
78839 + By design some architectures do not allow for protecting memory
78840 + pages against execution or even if they do, Linux does not make
78841 + use of this feature. In practice this means that if a page is
78842 + readable (such as the stack or heap) it is also executable.
78843 +
78844 + There is a well known exploit technique that makes use of this
78845 + fact and a common programming mistake where an attacker can
78846 + introduce code of his choice somewhere in the attacked program's
78847 + memory (typically the stack or the heap) and then execute it.
78848 +
78849 + If the attacked program was running with different (typically
78850 + higher) privileges than that of the attacker, then he can elevate
78851 + his own privilege level (e.g. get a root shell, write to files for
78852 + which he does not have write access to, etc).
78853 +
78854 + Enabling this option will let you choose from various features
78855 + that prevent the injection and execution of 'foreign' code in
78856 + a program.
78857 +
78858 + This will also break programs that rely on the old behaviour and
78859 + expect that dynamically allocated memory via the malloc() family
78860 + of functions is executable (which it is not). Notable examples
78861 + are the XFree86 4.x server, the java runtime and wine.
78862 +
78863 +config PAX_PAGEEXEC
78864 + bool "Paging based non-executable pages"
78865 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78866 + select S390_SWITCH_AMODE if S390
78867 + select S390_EXEC_PROTECT if S390
78868 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78869 + help
78870 + This implementation is based on the paging feature of the CPU.
78871 + On i386 without hardware non-executable bit support there is a
78872 + variable but usually low performance impact, however on Intel's
78873 + P4 core based CPUs it is very high so you should not enable this
78874 + for kernels meant to be used on such CPUs.
78875 +
78876 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78877 + with hardware non-executable bit support there is no performance
78878 + impact, on ppc the impact is negligible.
78879 +
78880 + Note that several architectures require various emulations due to
78881 + badly designed userland ABIs, this will cause a performance impact
78882 + but will disappear as soon as userland is fixed. For example, ppc
78883 + userland MUST have been built with secure-plt by a recent toolchain.
78884 +
78885 +config PAX_SEGMEXEC
78886 + bool "Segmentation based non-executable pages"
78887 + depends on PAX_NOEXEC && X86_32
78888 + help
78889 + This implementation is based on the segmentation feature of the
78890 + CPU and has a very small performance impact, however applications
78891 + will be limited to a 1.5 GB address space instead of the normal
78892 + 3 GB.
78893 +
78894 +config PAX_EMUTRAMP
78895 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78896 + default y if PARISC
78897 + help
78898 + There are some programs and libraries that for one reason or
78899 + another attempt to execute special small code snippets from
78900 + non-executable memory pages. Most notable examples are the
78901 + signal handler return code generated by the kernel itself and
78902 + the GCC trampolines.
78903 +
78904 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78905 + such programs will no longer work under your kernel.
78906 +
78907 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78908 + utilities to enable trampoline emulation for the affected programs
78909 + yet still have the protection provided by the non-executable pages.
78910 +
78911 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78912 + your system will not even boot.
78913 +
78914 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78915 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78916 + for the affected files.
78917 +
78918 + NOTE: enabling this feature *may* open up a loophole in the
78919 + protection provided by non-executable pages that an attacker
78920 + could abuse. Therefore the best solution is to not have any
78921 + files on your system that would require this option. This can
78922 + be achieved by not using libc5 (which relies on the kernel
78923 + signal handler return code) and not using or rewriting programs
78924 + that make use of the nested function implementation of GCC.
78925 + Skilled users can just fix GCC itself so that it implements
78926 + nested function calls in a way that does not interfere with PaX.
78927 +
78928 +config PAX_EMUSIGRT
78929 + bool "Automatically emulate sigreturn trampolines"
78930 + depends on PAX_EMUTRAMP && PARISC
78931 + default y
78932 + help
78933 + Enabling this option will have the kernel automatically detect
78934 + and emulate signal return trampolines executing on the stack
78935 + that would otherwise lead to task termination.
78936 +
78937 + This solution is intended as a temporary one for users with
78938 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78939 + Modula-3 runtime, etc) or executables linked to such, basically
78940 + everything that does not specify its own SA_RESTORER function in
78941 + normal executable memory like glibc 2.1+ does.
78942 +
78943 + On parisc you MUST enable this option, otherwise your system will
78944 + not even boot.
78945 +
78946 + NOTE: this feature cannot be disabled on a per executable basis
78947 + and since it *does* open up a loophole in the protection provided
78948 + by non-executable pages, the best solution is to not have any
78949 + files on your system that would require this option.
78950 +
78951 +config PAX_MPROTECT
78952 + bool "Restrict mprotect()"
78953 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78954 + help
78955 + Enabling this option will prevent programs from
78956 + - changing the executable status of memory pages that were
78957 + not originally created as executable,
78958 + - making read-only executable pages writable again,
78959 + - creating executable pages from anonymous memory,
78960 + - making read-only-after-relocations (RELRO) data pages writable again.
78961 +
78962 + You should say Y here to complete the protection provided by
78963 + the enforcement of non-executable pages.
78964 +
78965 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78966 + this feature on a per file basis.
78967 +
78968 +config PAX_MPROTECT_COMPAT
78969 + bool "Use legacy/compat protection demoting (read help)"
78970 + depends on PAX_MPROTECT
78971 + default n
78972 + help
78973 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78974 + by sending the proper error code to the application. For some broken
78975 + userland, this can cause problems with Python or other applications. The
78976 + current implementation however allows for applications like clamav to
78977 + detect if JIT compilation/execution is allowed and to fall back gracefully
78978 + to an interpreter-based mode if it does not. While we encourage everyone
78979 + to use the current implementation as-is and push upstream to fix broken
78980 + userland (note that the RWX logging option can assist with this), in some
78981 + environments this may not be possible. Having to disable MPROTECT
78982 + completely on certain binaries reduces the security benefit of PaX,
78983 + so this option is provided for those environments to revert to the old
78984 + behavior.
78985 +
78986 +config PAX_ELFRELOCS
78987 + bool "Allow ELF text relocations (read help)"
78988 + depends on PAX_MPROTECT
78989 + default n
78990 + help
78991 + Non-executable pages and mprotect() restrictions are effective
78992 + in preventing the introduction of new executable code into an
78993 + attacked task's address space. There remain only two venues
78994 + for this kind of attack: if the attacker can execute already
78995 + existing code in the attacked task then he can either have it
78996 + create and mmap() a file containing his code or have it mmap()
78997 + an already existing ELF library that does not have position
78998 + independent code in it and use mprotect() on it to make it
78999 + writable and copy his code there. While protecting against
79000 + the former approach is beyond PaX, the latter can be prevented
79001 + by having only PIC ELF libraries on one's system (which do not
79002 + need to relocate their code). If you are sure this is your case,
79003 + as is the case with all modern Linux distributions, then leave
79004 + this option disabled. You should say 'n' here.
79005 +
79006 +config PAX_ETEXECRELOCS
79007 + bool "Allow ELF ET_EXEC text relocations"
79008 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
79009 + select PAX_ELFRELOCS
79010 + default y
79011 + help
79012 + On some architectures there are incorrectly created applications
79013 + that require text relocations and would not work without enabling
79014 + this option. If you are an alpha, ia64 or parisc user, you should
79015 + enable this option and disable it once you have made sure that
79016 + none of your applications need it.
79017 +
79018 +config PAX_EMUPLT
79019 + bool "Automatically emulate ELF PLT"
79020 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
79021 + default y
79022 + help
79023 + Enabling this option will have the kernel automatically detect
79024 + and emulate the Procedure Linkage Table entries in ELF files.
79025 + On some architectures such entries are in writable memory, and
79026 + become non-executable leading to task termination. Therefore
79027 + it is mandatory that you enable this option on alpha, parisc,
79028 + sparc and sparc64, otherwise your system would not even boot.
79029 +
79030 + NOTE: this feature *does* open up a loophole in the protection
79031 + provided by the non-executable pages, therefore the proper
79032 + solution is to modify the toolchain to produce a PLT that does
79033 + not need to be writable.
79034 +
79035 +config PAX_DLRESOLVE
79036 + bool 'Emulate old glibc resolver stub'
79037 + depends on PAX_EMUPLT && SPARC
79038 + default n
79039 + help
79040 + This option is needed if userland has an old glibc (before 2.4)
79041 + that puts a 'save' instruction into the runtime generated resolver
79042 + stub that needs special emulation.
79043 +
79044 +config PAX_KERNEXEC
79045 + bool "Enforce non-executable kernel pages"
79046 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79047 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79048 + select PAX_KERNEXEC_PLUGIN if X86_64
79049 + help
79050 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79051 + that is, enabling this option will make it harder to inject
79052 + and execute 'foreign' code in kernel memory itself.
79053 +
79054 + Note that on x86_64 kernels there is a known regression when
79055 + this feature and KVM/VMX are both enabled in the host kernel.
79056 +
79057 +choice
79058 + prompt "Return Address Instrumentation Method"
79059 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
79060 + depends on PAX_KERNEXEC_PLUGIN
79061 + help
79062 + Select the method used to instrument function pointer dereferences.
79063 + Note that binary modules cannot be instrumented by this approach.
79064 +
79065 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
79066 + bool "bts"
79067 + help
79068 + This method is compatible with binary only modules but has
79069 + a higher runtime overhead.
79070 +
79071 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
79072 + bool "or"
79073 + depends on !PARAVIRT
79074 + help
79075 + This method is incompatible with binary only modules but has
79076 + a lower runtime overhead.
79077 +endchoice
79078 +
79079 +config PAX_KERNEXEC_PLUGIN_METHOD
79080 + string
79081 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
79082 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
79083 + default ""
79084 +
79085 +config PAX_KERNEXEC_MODULE_TEXT
79086 + int "Minimum amount of memory reserved for module code"
79087 + default "4"
79088 + depends on PAX_KERNEXEC && X86_32 && MODULES
79089 + help
79090 + Due to implementation details the kernel must reserve a fixed
79091 + amount of memory for module code at compile time that cannot be
79092 + changed at runtime. Here you can specify the minimum amount
79093 + in MB that will be reserved. Due to the same implementation
79094 + details this size will always be rounded up to the next 2/4 MB
79095 + boundary (depends on PAE) so the actually available memory for
79096 + module code will usually be more than this minimum.
79097 +
79098 + The default 4 MB should be enough for most users but if you have
79099 + an excessive number of modules (e.g., most distribution configs
79100 + compile many drivers as modules) or use huge modules such as
79101 + nvidia's kernel driver, you will need to adjust this amount.
79102 + A good rule of thumb is to look at your currently loaded kernel
79103 + modules and add up their sizes.
79104 +
79105 +endmenu
79106 +
79107 +menu "Address Space Layout Randomization"
79108 + depends on PAX
79109 +
79110 +config PAX_ASLR
79111 + bool "Address Space Layout Randomization"
79112 + help
79113 + Many if not most exploit techniques rely on the knowledge of
79114 + certain addresses in the attacked program. The following options
79115 + will allow the kernel to apply a certain amount of randomization
79116 + to specific parts of the program thereby forcing an attacker to
79117 + guess them in most cases. Any failed guess will most likely crash
79118 + the attacked program which allows the kernel to detect such attempts
79119 + and react on them. PaX itself provides no reaction mechanisms,
79120 + instead it is strongly encouraged that you make use of Nergal's
79121 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
79122 + (http://www.grsecurity.net/) built-in crash detection features or
79123 + develop one yourself.
79124 +
79125 + By saying Y here you can choose to randomize the following areas:
79126 + - top of the task's kernel stack
79127 + - top of the task's userland stack
79128 + - base address for mmap() requests that do not specify one
79129 + (this includes all libraries)
79130 + - base address of the main executable
79131 +
79132 + It is strongly recommended to say Y here as address space layout
79133 + randomization has negligible impact on performance yet it provides
79134 + a very effective protection.
79135 +
79136 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79137 + this feature on a per file basis.
79138 +
79139 +config PAX_RANDKSTACK
79140 + bool "Randomize kernel stack base"
79141 + depends on X86_TSC && X86
79142 + help
79143 + By saying Y here the kernel will randomize every task's kernel
79144 + stack on every system call. This will not only force an attacker
79145 + to guess it but also prevent him from making use of possible
79146 + leaked information about it.
79147 +
79148 + Since the kernel stack is a rather scarce resource, randomization
79149 + may cause unexpected stack overflows, therefore you should very
79150 + carefully test your system. Note that once enabled in the kernel
79151 + configuration, this feature cannot be disabled on a per file basis.
79152 +
79153 +config PAX_RANDUSTACK
79154 + bool "Randomize user stack base"
79155 + depends on PAX_ASLR
79156 + help
79157 + By saying Y here the kernel will randomize every task's userland
79158 + stack. The randomization is done in two steps where the second
79159 + one may apply a big amount of shift to the top of the stack and
79160 + cause problems for programs that want to use lots of memory (more
79161 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
79162 + For this reason the second step can be controlled by 'chpax' or
79163 + 'paxctl' on a per file basis.
79164 +
79165 +config PAX_RANDMMAP
79166 + bool "Randomize mmap() base"
79167 + depends on PAX_ASLR
79168 + help
79169 + By saying Y here the kernel will use a randomized base address for
79170 + mmap() requests that do not specify one themselves. As a result
79171 + all dynamically loaded libraries will appear at random addresses
79172 + and therefore be harder to exploit by a technique where an attacker
79173 + attempts to execute library code for his purposes (e.g. spawn a
79174 + shell from an exploited program that is running at an elevated
79175 + privilege level).
79176 +
79177 + Furthermore, if a program is relinked as a dynamic ELF file, its
79178 + base address will be randomized as well, completing the full
79179 + randomization of the address space layout. Attacking such programs
79180 + becomes a guess game. You can find an example of doing this at
79181 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79182 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79183 +
79184 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79185 + feature on a per file basis.
79186 +
79187 +endmenu
79188 +
79189 +menu "Miscellaneous hardening features"
79190 +
79191 +config PAX_MEMORY_SANITIZE
79192 + bool "Sanitize all freed memory"
79193 + depends on !HIBERNATION
79194 + help
79195 + By saying Y here the kernel will erase memory pages as soon as they
79196 + are freed. This in turn reduces the lifetime of data stored in the
79197 + pages, making it less likely that sensitive information such as
79198 + passwords, cryptographic secrets, etc stay in memory for too long.
79199 +
79200 + This is especially useful for programs whose runtime is short, long
79201 + lived processes and the kernel itself benefit from this as long as
79202 + they operate on whole memory pages and ensure timely freeing of pages
79203 + that may hold sensitive information.
79204 +
79205 + The tradeoff is performance impact, on a single CPU system kernel
79206 + compilation sees a 3% slowdown, other systems and workloads may vary
79207 + and you are advised to test this feature on your expected workload
79208 + before deploying it.
79209 +
79210 + Note that this feature does not protect data stored in live pages,
79211 + e.g., process memory swapped to disk may stay there for a long time.
79212 +
79213 +config PAX_MEMORY_STACKLEAK
79214 + bool "Sanitize kernel stack"
79215 + depends on X86
79216 + help
79217 + By saying Y here the kernel will erase the kernel stack before it
79218 + returns from a system call. This in turn reduces the information
79219 + that a kernel stack leak bug can reveal.
79220 +
79221 + Note that such a bug can still leak information that was put on
79222 + the stack by the current system call (the one eventually triggering
79223 + the bug) but traces of earlier system calls on the kernel stack
79224 + cannot leak anymore.
79225 +
79226 + The tradeoff is performance impact: on a single CPU system kernel
79227 + compilation sees a 1% slowdown, other systems and workloads may vary
79228 + and you are advised to test this feature on your expected workload
79229 + before deploying it.
79230 +
79231 + Note: full support for this feature requires gcc with plugin support
79232 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
79233 + versions means that functions with large enough stack frames may
79234 + leave uninitialized memory behind that may be exposed to a later
79235 + syscall leaking the stack.
79236 +
79237 +config PAX_MEMORY_UDEREF
79238 + bool "Prevent invalid userland pointer dereference"
79239 + depends on X86 && !UML_X86 && !XEN
79240 + select PAX_PER_CPU_PGD if X86_64
79241 + help
79242 + By saying Y here the kernel will be prevented from dereferencing
79243 + userland pointers in contexts where the kernel expects only kernel
79244 + pointers. This is both a useful runtime debugging feature and a
79245 + security measure that prevents exploiting a class of kernel bugs.
79246 +
79247 + The tradeoff is that some virtualization solutions may experience
79248 + a huge slowdown and therefore you should not enable this feature
79249 + for kernels meant to run in such environments. Whether a given VM
79250 + solution is affected or not is best determined by simply trying it
79251 + out, the performance impact will be obvious right on boot as this
79252 + mechanism engages from very early on. A good rule of thumb is that
79253 + VMs running on CPUs without hardware virtualization support (i.e.,
79254 + the majority of IA-32 CPUs) will likely experience the slowdown.
79255 +
79256 +config PAX_REFCOUNT
79257 + bool "Prevent various kernel object reference counter overflows"
79258 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
79259 + help
79260 + By saying Y here the kernel will detect and prevent overflowing
79261 + various (but not all) kinds of object reference counters. Such
79262 + overflows can normally occur due to bugs only and are often, if
79263 + not always, exploitable.
79264 +
79265 + The tradeoff is that data structures protected by an overflowed
79266 + refcount will never be freed and therefore will leak memory. Note
79267 + that this leak also happens even without this protection but in
79268 + that case the overflow can eventually trigger the freeing of the
79269 + data structure while it is still being used elsewhere, resulting
79270 + in the exploitable situation that this feature prevents.
79271 +
79272 + Since this has a negligible performance impact, you should enable
79273 + this feature.
79274 +
79275 +config PAX_USERCOPY
79276 + bool "Harden heap object copies between kernel and userland"
79277 + depends on X86 || PPC || SPARC || ARM
79278 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79279 + help
79280 + By saying Y here the kernel will enforce the size of heap objects
79281 + when they are copied in either direction between the kernel and
79282 + userland, even if only a part of the heap object is copied.
79283 +
79284 + Specifically, this checking prevents information leaking from the
79285 + kernel heap during kernel to userland copies (if the kernel heap
79286 + object is otherwise fully initialized) and prevents kernel heap
79287 + overflows during userland to kernel copies.
79288 +
79289 + Note that the current implementation provides the strictest bounds
79290 + checks for the SLUB allocator.
79291 +
79292 + Enabling this option also enables per-slab cache protection against
79293 + data in a given cache being copied into/out of via userland
79294 + accessors. Though the whitelist of regions will be reduced over
79295 + time, it notably protects important data structures like task structs.
79296 +
79297 + If frame pointers are enabled on x86, this option will also restrict
79298 + copies into and out of the kernel stack to local variables within a
79299 + single frame.
79300 +
79301 + Since this has a negligible performance impact, you should enable
79302 + this feature.
79303 +
79304 +config PAX_SIZE_OVERFLOW
79305 + bool "Prevent various integer overflows in function size parameters"
79306 + depends on X86
79307 + help
79308 + By saying Y here the kernel recomputes expressions of function
79309 + arguments marked by a size_overflow attribute with double integer
79310 + precision (DImode/TImode for 32/64 bit integer types).
79311 +
79312 + The recomputed argument is checked against INT_MAX and an event
79313 + is logged on overflow and the triggering process is killed.
79314 +
79315 + Homepage:
79316 + http://www.grsecurity.net/~ephox/overflow_plugin/
79317 +
79318 +endmenu
79319 +
79320 +endmenu
79321 +
79322 config KEYS
79323 bool "Enable access key retention support"
79324 help
79325 @@ -169,7 +803,7 @@ config INTEL_TXT
79326 config LSM_MMAP_MIN_ADDR
79327 int "Low address space for LSM to protect from user allocation"
79328 depends on SECURITY && SECURITY_SELINUX
79329 - default 32768 if ARM
79330 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79331 default 65536
79332 help
79333 This is the portion of low virtual memory which should be protected
79334 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79335 index 97ce8fa..23dad96 100644
79336 --- a/security/apparmor/lsm.c
79337 +++ b/security/apparmor/lsm.c
79338 @@ -620,7 +620,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79339 return error;
79340 }
79341
79342 -static struct security_operations apparmor_ops = {
79343 +static struct security_operations apparmor_ops __read_only = {
79344 .name = "apparmor",
79345
79346 .ptrace_access_check = apparmor_ptrace_access_check,
79347 diff --git a/security/commoncap.c b/security/commoncap.c
79348 index b8d2bb9..980069e 100644
79349 --- a/security/commoncap.c
79350 +++ b/security/commoncap.c
79351 @@ -29,6 +29,7 @@
79352 #include <linux/securebits.h>
79353 #include <linux/user_namespace.h>
79354 #include <linux/personality.h>
79355 +#include <net/sock.h>
79356
79357 /*
79358 * If a non-root user executes a setuid-root binary in
79359 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79360 {
79361 const struct cred *cred = current_cred();
79362
79363 + if (gr_acl_enable_at_secure())
79364 + return 1;
79365 +
79366 if (cred->uid != 0) {
79367 if (bprm->cap_effective)
79368 return 1;
79369 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79370 index 3ccf7ac..d73ad64 100644
79371 --- a/security/integrity/ima/ima.h
79372 +++ b/security/integrity/ima/ima.h
79373 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79374 extern spinlock_t ima_queue_lock;
79375
79376 struct ima_h_table {
79377 - atomic_long_t len; /* number of stored measurements in the list */
79378 - atomic_long_t violations;
79379 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79380 + atomic_long_unchecked_t violations;
79381 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79382 };
79383 extern struct ima_h_table ima_htable;
79384 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79385 index 88a2788..581ab92 100644
79386 --- a/security/integrity/ima/ima_api.c
79387 +++ b/security/integrity/ima/ima_api.c
79388 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79389 int result;
79390
79391 /* can overflow, only indicator */
79392 - atomic_long_inc(&ima_htable.violations);
79393 + atomic_long_inc_unchecked(&ima_htable.violations);
79394
79395 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79396 if (!entry) {
79397 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79398 index e1aa2b4..52027bf 100644
79399 --- a/security/integrity/ima/ima_fs.c
79400 +++ b/security/integrity/ima/ima_fs.c
79401 @@ -28,12 +28,12 @@
79402 static int valid_policy = 1;
79403 #define TMPBUFLEN 12
79404 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79405 - loff_t *ppos, atomic_long_t *val)
79406 + loff_t *ppos, atomic_long_unchecked_t *val)
79407 {
79408 char tmpbuf[TMPBUFLEN];
79409 ssize_t len;
79410
79411 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79412 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79413 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79414 }
79415
79416 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79417 index 55a6271..ad829c3 100644
79418 --- a/security/integrity/ima/ima_queue.c
79419 +++ b/security/integrity/ima/ima_queue.c
79420 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79421 INIT_LIST_HEAD(&qe->later);
79422 list_add_tail_rcu(&qe->later, &ima_measurements);
79423
79424 - atomic_long_inc(&ima_htable.len);
79425 + atomic_long_inc_unchecked(&ima_htable.len);
79426 key = ima_hash_key(entry->digest);
79427 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79428 return 0;
79429 diff --git a/security/keys/compat.c b/security/keys/compat.c
79430 index 4c48e13..7abdac9 100644
79431 --- a/security/keys/compat.c
79432 +++ b/security/keys/compat.c
79433 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79434 if (ret == 0)
79435 goto no_payload_free;
79436
79437 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79438 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79439
79440 if (iov != iovstack)
79441 kfree(iov);
79442 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79443 index 0b3f5d7..892c8a6 100644
79444 --- a/security/keys/keyctl.c
79445 +++ b/security/keys/keyctl.c
79446 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79447 /*
79448 * Copy the iovec data from userspace
79449 */
79450 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79451 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79452 unsigned ioc)
79453 {
79454 for (; ioc > 0; ioc--) {
79455 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79456 * If successful, 0 will be returned.
79457 */
79458 long keyctl_instantiate_key_common(key_serial_t id,
79459 - const struct iovec *payload_iov,
79460 + const struct iovec __user *payload_iov,
79461 unsigned ioc,
79462 size_t plen,
79463 key_serial_t ringid)
79464 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79465 [0].iov_len = plen
79466 };
79467
79468 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79469 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79470 }
79471
79472 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79473 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79474 if (ret == 0)
79475 goto no_payload_free;
79476
79477 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79478 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79479
79480 if (iov != iovstack)
79481 kfree(iov);
79482 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79483 index d605f75..2bc6be9 100644
79484 --- a/security/keys/keyring.c
79485 +++ b/security/keys/keyring.c
79486 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79487 ret = -EFAULT;
79488
79489 for (loop = 0; loop < klist->nkeys; loop++) {
79490 + key_serial_t serial;
79491 key = klist->keys[loop];
79492 + serial = key->serial;
79493
79494 tmp = sizeof(key_serial_t);
79495 if (tmp > buflen)
79496 tmp = buflen;
79497
79498 - if (copy_to_user(buffer,
79499 - &key->serial,
79500 - tmp) != 0)
79501 + if (copy_to_user(buffer, &serial, tmp))
79502 goto error;
79503
79504 buflen -= tmp;
79505 diff --git a/security/min_addr.c b/security/min_addr.c
79506 index f728728..6457a0c 100644
79507 --- a/security/min_addr.c
79508 +++ b/security/min_addr.c
79509 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79510 */
79511 static void update_mmap_min_addr(void)
79512 {
79513 +#ifndef SPARC
79514 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79515 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79516 mmap_min_addr = dac_mmap_min_addr;
79517 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79518 #else
79519 mmap_min_addr = dac_mmap_min_addr;
79520 #endif
79521 +#endif
79522 }
79523
79524 /*
79525 diff --git a/security/security.c b/security/security.c
79526 index d754249..8bf426e 100644
79527 --- a/security/security.c
79528 +++ b/security/security.c
79529 @@ -26,8 +26,8 @@
79530 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79531 CONFIG_DEFAULT_SECURITY;
79532
79533 -static struct security_operations *security_ops;
79534 -static struct security_operations default_security_ops = {
79535 +static struct security_operations *security_ops __read_only;
79536 +static struct security_operations default_security_ops __read_only = {
79537 .name = "default",
79538 };
79539
79540 @@ -68,7 +68,9 @@ int __init security_init(void)
79541
79542 void reset_security_ops(void)
79543 {
79544 + pax_open_kernel();
79545 security_ops = &default_security_ops;
79546 + pax_close_kernel();
79547 }
79548
79549 /* Save user chosen LSM */
79550 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79551 index 6a3683e..f52f4c0 100644
79552 --- a/security/selinux/hooks.c
79553 +++ b/security/selinux/hooks.c
79554 @@ -94,8 +94,6 @@
79555
79556 #define NUM_SEL_MNT_OPTS 5
79557
79558 -extern struct security_operations *security_ops;
79559 -
79560 /* SECMARK reference count */
79561 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79562
79563 @@ -5429,7 +5427,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79564
79565 #endif
79566
79567 -static struct security_operations selinux_ops = {
79568 +static struct security_operations selinux_ops __read_only = {
79569 .name = "selinux",
79570
79571 .ptrace_access_check = selinux_ptrace_access_check,
79572 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79573 index b43813c..74be837 100644
79574 --- a/security/selinux/include/xfrm.h
79575 +++ b/security/selinux/include/xfrm.h
79576 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79577
79578 static inline void selinux_xfrm_notify_policyload(void)
79579 {
79580 - atomic_inc(&flow_cache_genid);
79581 + atomic_inc_unchecked(&flow_cache_genid);
79582 }
79583 #else
79584 static inline int selinux_xfrm_enabled(void)
79585 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79586 index e8af5b0b..78527ef 100644
79587 --- a/security/smack/smack_lsm.c
79588 +++ b/security/smack/smack_lsm.c
79589 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79590 return 0;
79591 }
79592
79593 -struct security_operations smack_ops = {
79594 +struct security_operations smack_ops __read_only = {
79595 .name = "smack",
79596
79597 .ptrace_access_check = smack_ptrace_access_check,
79598 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79599 index 620d37c..e2ad89b 100644
79600 --- a/security/tomoyo/tomoyo.c
79601 +++ b/security/tomoyo/tomoyo.c
79602 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
79603 * tomoyo_security_ops is a "struct security_operations" which is used for
79604 * registering TOMOYO.
79605 */
79606 -static struct security_operations tomoyo_security_ops = {
79607 +static struct security_operations tomoyo_security_ops __read_only = {
79608 .name = "tomoyo",
79609 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79610 .cred_prepare = tomoyo_cred_prepare,
79611 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79612 index 762af68..7103453 100644
79613 --- a/sound/aoa/codecs/onyx.c
79614 +++ b/sound/aoa/codecs/onyx.c
79615 @@ -54,7 +54,7 @@ struct onyx {
79616 spdif_locked:1,
79617 analog_locked:1,
79618 original_mute:2;
79619 - int open_count;
79620 + local_t open_count;
79621 struct codec_info *codec_info;
79622
79623 /* mutex serializes concurrent access to the device
79624 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79625 struct onyx *onyx = cii->codec_data;
79626
79627 mutex_lock(&onyx->mutex);
79628 - onyx->open_count++;
79629 + local_inc(&onyx->open_count);
79630 mutex_unlock(&onyx->mutex);
79631
79632 return 0;
79633 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79634 struct onyx *onyx = cii->codec_data;
79635
79636 mutex_lock(&onyx->mutex);
79637 - onyx->open_count--;
79638 - if (!onyx->open_count)
79639 + if (local_dec_and_test(&onyx->open_count))
79640 onyx->spdif_locked = onyx->analog_locked = 0;
79641 mutex_unlock(&onyx->mutex);
79642
79643 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79644 index ffd2025..df062c9 100644
79645 --- a/sound/aoa/codecs/onyx.h
79646 +++ b/sound/aoa/codecs/onyx.h
79647 @@ -11,6 +11,7 @@
79648 #include <linux/i2c.h>
79649 #include <asm/pmac_low_i2c.h>
79650 #include <asm/prom.h>
79651 +#include <asm/local.h>
79652
79653 /* PCM3052 register definitions */
79654
79655 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79656 index 08fde00..0bf641a 100644
79657 --- a/sound/core/oss/pcm_oss.c
79658 +++ b/sound/core/oss/pcm_oss.c
79659 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79660 if (in_kernel) {
79661 mm_segment_t fs;
79662 fs = snd_enter_user();
79663 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79664 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79665 snd_leave_user(fs);
79666 } else {
79667 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79668 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79669 }
79670 if (ret != -EPIPE && ret != -ESTRPIPE)
79671 break;
79672 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79673 if (in_kernel) {
79674 mm_segment_t fs;
79675 fs = snd_enter_user();
79676 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79677 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79678 snd_leave_user(fs);
79679 } else {
79680 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79681 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79682 }
79683 if (ret == -EPIPE) {
79684 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79685 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79686 struct snd_pcm_plugin_channel *channels;
79687 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79688 if (!in_kernel) {
79689 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79690 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79691 return -EFAULT;
79692 buf = runtime->oss.buffer;
79693 }
79694 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79695 }
79696 } else {
79697 tmp = snd_pcm_oss_write2(substream,
79698 - (const char __force *)buf,
79699 + (const char __force_kernel *)buf,
79700 runtime->oss.period_bytes, 0);
79701 if (tmp <= 0)
79702 goto err;
79703 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79704 struct snd_pcm_runtime *runtime = substream->runtime;
79705 snd_pcm_sframes_t frames, frames1;
79706 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79707 - char __user *final_dst = (char __force __user *)buf;
79708 + char __user *final_dst = (char __force_user *)buf;
79709 if (runtime->oss.plugin_first) {
79710 struct snd_pcm_plugin_channel *channels;
79711 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79712 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79713 xfer += tmp;
79714 runtime->oss.buffer_used -= tmp;
79715 } else {
79716 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79717 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79718 runtime->oss.period_bytes, 0);
79719 if (tmp <= 0)
79720 goto err;
79721 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79722 size1);
79723 size1 /= runtime->channels; /* frames */
79724 fs = snd_enter_user();
79725 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79726 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79727 snd_leave_user(fs);
79728 }
79729 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79730 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79731 index 91cdf94..4085161 100644
79732 --- a/sound/core/pcm_compat.c
79733 +++ b/sound/core/pcm_compat.c
79734 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79735 int err;
79736
79737 fs = snd_enter_user();
79738 - err = snd_pcm_delay(substream, &delay);
79739 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79740 snd_leave_user(fs);
79741 if (err < 0)
79742 return err;
79743 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79744 index 25ed9fe..24c46e9 100644
79745 --- a/sound/core/pcm_native.c
79746 +++ b/sound/core/pcm_native.c
79747 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79748 switch (substream->stream) {
79749 case SNDRV_PCM_STREAM_PLAYBACK:
79750 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79751 - (void __user *)arg);
79752 + (void __force_user *)arg);
79753 break;
79754 case SNDRV_PCM_STREAM_CAPTURE:
79755 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79756 - (void __user *)arg);
79757 + (void __force_user *)arg);
79758 break;
79759 default:
79760 result = -EINVAL;
79761 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79762 index 5cf8d65..912a79c 100644
79763 --- a/sound/core/seq/seq_device.c
79764 +++ b/sound/core/seq/seq_device.c
79765 @@ -64,7 +64,7 @@ struct ops_list {
79766 int argsize; /* argument size */
79767
79768 /* operators */
79769 - struct snd_seq_dev_ops ops;
79770 + struct snd_seq_dev_ops *ops;
79771
79772 /* registred devices */
79773 struct list_head dev_list; /* list of devices */
79774 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
79775
79776 mutex_lock(&ops->reg_mutex);
79777 /* copy driver operators */
79778 - ops->ops = *entry;
79779 + ops->ops = entry;
79780 ops->driver |= DRIVER_LOADED;
79781 ops->argsize = argsize;
79782
79783 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
79784 dev->name, ops->id, ops->argsize, dev->argsize);
79785 return -EINVAL;
79786 }
79787 - if (ops->ops.init_device(dev) >= 0) {
79788 + if (ops->ops->init_device(dev) >= 0) {
79789 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
79790 ops->num_init_devices++;
79791 } else {
79792 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79793 dev->name, ops->id, ops->argsize, dev->argsize);
79794 return -EINVAL;
79795 }
79796 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79797 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79798 dev->status = SNDRV_SEQ_DEVICE_FREE;
79799 dev->driver_data = NULL;
79800 ops->num_init_devices--;
79801 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79802 index 621e60e..f4543f5 100644
79803 --- a/sound/drivers/mts64.c
79804 +++ b/sound/drivers/mts64.c
79805 @@ -29,6 +29,7 @@
79806 #include <sound/initval.h>
79807 #include <sound/rawmidi.h>
79808 #include <sound/control.h>
79809 +#include <asm/local.h>
79810
79811 #define CARD_NAME "Miditerminal 4140"
79812 #define DRIVER_NAME "MTS64"
79813 @@ -67,7 +68,7 @@ struct mts64 {
79814 struct pardevice *pardev;
79815 int pardev_claimed;
79816
79817 - int open_count;
79818 + local_t open_count;
79819 int current_midi_output_port;
79820 int current_midi_input_port;
79821 u8 mode[MTS64_NUM_INPUT_PORTS];
79822 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79823 {
79824 struct mts64 *mts = substream->rmidi->private_data;
79825
79826 - if (mts->open_count == 0) {
79827 + if (local_read(&mts->open_count) == 0) {
79828 /* We don't need a spinlock here, because this is just called
79829 if the device has not been opened before.
79830 So there aren't any IRQs from the device */
79831 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79832
79833 msleep(50);
79834 }
79835 - ++(mts->open_count);
79836 + local_inc(&mts->open_count);
79837
79838 return 0;
79839 }
79840 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79841 struct mts64 *mts = substream->rmidi->private_data;
79842 unsigned long flags;
79843
79844 - --(mts->open_count);
79845 - if (mts->open_count == 0) {
79846 + if (local_dec_return(&mts->open_count) == 0) {
79847 /* We need the spinlock_irqsave here because we can still
79848 have IRQs at this point */
79849 spin_lock_irqsave(&mts->lock, flags);
79850 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79851
79852 msleep(500);
79853
79854 - } else if (mts->open_count < 0)
79855 - mts->open_count = 0;
79856 + } else if (local_read(&mts->open_count) < 0)
79857 + local_set(&mts->open_count, 0);
79858
79859 return 0;
79860 }
79861 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79862 index b953fb4..1999c01 100644
79863 --- a/sound/drivers/opl4/opl4_lib.c
79864 +++ b/sound/drivers/opl4/opl4_lib.c
79865 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79866 MODULE_DESCRIPTION("OPL4 driver");
79867 MODULE_LICENSE("GPL");
79868
79869 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79870 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79871 {
79872 int timeout = 10;
79873 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79874 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79875 index 3e32bd3..46fc152 100644
79876 --- a/sound/drivers/portman2x4.c
79877 +++ b/sound/drivers/portman2x4.c
79878 @@ -48,6 +48,7 @@
79879 #include <sound/initval.h>
79880 #include <sound/rawmidi.h>
79881 #include <sound/control.h>
79882 +#include <asm/local.h>
79883
79884 #define CARD_NAME "Portman 2x4"
79885 #define DRIVER_NAME "portman"
79886 @@ -85,7 +86,7 @@ struct portman {
79887 struct pardevice *pardev;
79888 int pardev_claimed;
79889
79890 - int open_count;
79891 + local_t open_count;
79892 int mode[PORTMAN_NUM_INPUT_PORTS];
79893 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79894 };
79895 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79896 index 87657dd..a8268d4 100644
79897 --- a/sound/firewire/amdtp.c
79898 +++ b/sound/firewire/amdtp.c
79899 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79900 ptr = s->pcm_buffer_pointer + data_blocks;
79901 if (ptr >= pcm->runtime->buffer_size)
79902 ptr -= pcm->runtime->buffer_size;
79903 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79904 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79905
79906 s->pcm_period_pointer += data_blocks;
79907 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79908 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79909 */
79910 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79911 {
79912 - ACCESS_ONCE(s->source_node_id_field) =
79913 + ACCESS_ONCE_RW(s->source_node_id_field) =
79914 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79915 }
79916 EXPORT_SYMBOL(amdtp_out_stream_update);
79917 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79918 index 537a9cb..8e8c8e9 100644
79919 --- a/sound/firewire/amdtp.h
79920 +++ b/sound/firewire/amdtp.h
79921 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79922 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79923 struct snd_pcm_substream *pcm)
79924 {
79925 - ACCESS_ONCE(s->pcm) = pcm;
79926 + ACCESS_ONCE_RW(s->pcm) = pcm;
79927 }
79928
79929 /**
79930 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79931 index cd094ec..eca1277 100644
79932 --- a/sound/firewire/isight.c
79933 +++ b/sound/firewire/isight.c
79934 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79935 ptr += count;
79936 if (ptr >= runtime->buffer_size)
79937 ptr -= runtime->buffer_size;
79938 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79939 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79940
79941 isight->period_counter += count;
79942 if (isight->period_counter >= runtime->period_size) {
79943 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79944 if (err < 0)
79945 return err;
79946
79947 - ACCESS_ONCE(isight->pcm_active) = true;
79948 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79949
79950 return 0;
79951 }
79952 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79953 {
79954 struct isight *isight = substream->private_data;
79955
79956 - ACCESS_ONCE(isight->pcm_active) = false;
79957 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79958
79959 mutex_lock(&isight->mutex);
79960 isight_stop_streaming(isight);
79961 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79962
79963 switch (cmd) {
79964 case SNDRV_PCM_TRIGGER_START:
79965 - ACCESS_ONCE(isight->pcm_running) = true;
79966 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79967 break;
79968 case SNDRV_PCM_TRIGGER_STOP:
79969 - ACCESS_ONCE(isight->pcm_running) = false;
79970 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79971 break;
79972 default:
79973 return -EINVAL;
79974 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79975 index 7bd5e33..1fcab12 100644
79976 --- a/sound/isa/cmi8330.c
79977 +++ b/sound/isa/cmi8330.c
79978 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79979
79980 struct snd_pcm *pcm;
79981 struct snd_cmi8330_stream {
79982 - struct snd_pcm_ops ops;
79983 + snd_pcm_ops_no_const ops;
79984 snd_pcm_open_callback_t open;
79985 void *private_data; /* sb or wss */
79986 } streams[2];
79987 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79988 index 733b014..56ce96f 100644
79989 --- a/sound/oss/sb_audio.c
79990 +++ b/sound/oss/sb_audio.c
79991 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79992 buf16 = (signed short *)(localbuf + localoffs);
79993 while (c)
79994 {
79995 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79996 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79997 if (copy_from_user(lbuf8,
79998 userbuf+useroffs + p,
79999 locallen))
80000 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
80001 index 09d4648..cf234c7 100644
80002 --- a/sound/oss/swarm_cs4297a.c
80003 +++ b/sound/oss/swarm_cs4297a.c
80004 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
80005 {
80006 struct cs4297a_state *s;
80007 u32 pwr, id;
80008 - mm_segment_t fs;
80009 int rval;
80010 #ifndef CONFIG_BCM_CS4297A_CSWARM
80011 u64 cfg;
80012 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
80013 if (!rval) {
80014 char *sb1250_duart_present;
80015
80016 +#if 0
80017 + mm_segment_t fs;
80018 fs = get_fs();
80019 set_fs(KERNEL_DS);
80020 -#if 0
80021 val = SOUND_MASK_LINE;
80022 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
80023 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
80024 val = initvol[i].vol;
80025 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
80026 }
80027 + set_fs(fs);
80028 // cs4297a_write_ac97(s, 0x18, 0x0808);
80029 #else
80030 // cs4297a_write_ac97(s, 0x5e, 0x180);
80031 cs4297a_write_ac97(s, 0x02, 0x0808);
80032 cs4297a_write_ac97(s, 0x18, 0x0808);
80033 #endif
80034 - set_fs(fs);
80035
80036 list_add(&s->list, &cs4297a_devs);
80037
80038 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
80039 index f0f1943..8e1f96c 100644
80040 --- a/sound/pci/hda/hda_codec.h
80041 +++ b/sound/pci/hda/hda_codec.h
80042 @@ -611,7 +611,7 @@ struct hda_bus_ops {
80043 /* notify power-up/down from codec to controller */
80044 void (*pm_notify)(struct hda_bus *bus);
80045 #endif
80046 -};
80047 +} __no_const;
80048
80049 /* template to pass to the bus constructor */
80050 struct hda_bus_template {
80051 @@ -713,6 +713,7 @@ struct hda_codec_ops {
80052 #endif
80053 void (*reboot_notify)(struct hda_codec *codec);
80054 };
80055 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
80056
80057 /* record for amp information cache */
80058 struct hda_cache_head {
80059 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
80060 struct snd_pcm_substream *substream);
80061 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
80062 struct snd_pcm_substream *substream);
80063 -};
80064 +} __no_const;
80065
80066 /* PCM information for each substream */
80067 struct hda_pcm_stream {
80068 @@ -801,7 +802,7 @@ struct hda_codec {
80069 const char *modelname; /* model name for preset */
80070
80071 /* set by patch */
80072 - struct hda_codec_ops patch_ops;
80073 + hda_codec_ops_no_const patch_ops;
80074
80075 /* PCM to create, set by patch_ops.build_pcms callback */
80076 unsigned int num_pcms;
80077 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
80078 index 0da778a..bc38b84 100644
80079 --- a/sound/pci/ice1712/ice1712.h
80080 +++ b/sound/pci/ice1712/ice1712.h
80081 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
80082 unsigned int mask_flags; /* total mask bits */
80083 struct snd_akm4xxx_ops {
80084 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
80085 - } ops;
80086 + } __no_const ops;
80087 };
80088
80089 struct snd_ice1712_spdif {
80090 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
80091 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80092 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80093 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80094 - } ops;
80095 + } __no_const ops;
80096 };
80097
80098
80099 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
80100 index 12a9a2b..2b6138f 100644
80101 --- a/sound/pci/ymfpci/ymfpci_main.c
80102 +++ b/sound/pci/ymfpci/ymfpci_main.c
80103 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
80104 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
80105 break;
80106 }
80107 - if (atomic_read(&chip->interrupt_sleep_count)) {
80108 - atomic_set(&chip->interrupt_sleep_count, 0);
80109 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80110 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80111 wake_up(&chip->interrupt_sleep);
80112 }
80113 __end:
80114 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
80115 continue;
80116 init_waitqueue_entry(&wait, current);
80117 add_wait_queue(&chip->interrupt_sleep, &wait);
80118 - atomic_inc(&chip->interrupt_sleep_count);
80119 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
80120 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
80121 remove_wait_queue(&chip->interrupt_sleep, &wait);
80122 }
80123 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
80124 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
80125 spin_unlock(&chip->reg_lock);
80126
80127 - if (atomic_read(&chip->interrupt_sleep_count)) {
80128 - atomic_set(&chip->interrupt_sleep_count, 0);
80129 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80130 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80131 wake_up(&chip->interrupt_sleep);
80132 }
80133 }
80134 @@ -2389,7 +2389,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
80135 spin_lock_init(&chip->reg_lock);
80136 spin_lock_init(&chip->voice_lock);
80137 init_waitqueue_head(&chip->interrupt_sleep);
80138 - atomic_set(&chip->interrupt_sleep_count, 0);
80139 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80140 chip->card = card;
80141 chip->pci = pci;
80142 chip->irq = -1;
80143 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
80144 index cdc860a..db34a93 100644
80145 --- a/sound/soc/soc-pcm.c
80146 +++ b/sound/soc/soc-pcm.c
80147 @@ -605,7 +605,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
80148 struct snd_soc_platform *platform = rtd->platform;
80149 struct snd_soc_dai *codec_dai = rtd->codec_dai;
80150 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
80151 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
80152 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
80153 struct snd_pcm *pcm;
80154 char new_name[64];
80155 int ret = 0, playback = 0, capture = 0;
80156 diff --git a/sound/usb/card.h b/sound/usb/card.h
80157 index da5fa1a..113cd02 100644
80158 --- a/sound/usb/card.h
80159 +++ b/sound/usb/card.h
80160 @@ -45,6 +45,7 @@ struct snd_urb_ops {
80161 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80162 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80163 };
80164 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
80165
80166 struct snd_usb_substream {
80167 struct snd_usb_stream *stream;
80168 @@ -94,7 +95,7 @@ struct snd_usb_substream {
80169 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80170 spinlock_t lock;
80171
80172 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80173 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80174 int last_frame_number; /* stored frame number */
80175 int last_delay; /* stored delay */
80176 };
80177 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80178 new file mode 100644
80179 index 0000000..ca64170
80180 --- /dev/null
80181 +++ b/tools/gcc/Makefile
80182 @@ -0,0 +1,26 @@
80183 +#CC := gcc
80184 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80185 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80186 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80187 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
80188 +
80189 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
80190 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
80191 +
80192 +hostlibs-y := constify_plugin.so
80193 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80194 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80195 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80196 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80197 +hostlibs-y += colorize_plugin.so
80198 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
80199 +
80200 +always := $(hostlibs-y)
80201 +
80202 +constify_plugin-objs := constify_plugin.o
80203 +stackleak_plugin-objs := stackleak_plugin.o
80204 +kallocstat_plugin-objs := kallocstat_plugin.o
80205 +kernexec_plugin-objs := kernexec_plugin.o
80206 +checker_plugin-objs := checker_plugin.o
80207 +colorize_plugin-objs := colorize_plugin.o
80208 +size_overflow_plugin-objs := size_overflow_plugin.o
80209 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80210 new file mode 100644
80211 index 0000000..d41b5af
80212 --- /dev/null
80213 +++ b/tools/gcc/checker_plugin.c
80214 @@ -0,0 +1,171 @@
80215 +/*
80216 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80217 + * Licensed under the GPL v2
80218 + *
80219 + * Note: the choice of the license means that the compilation process is
80220 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80221 + * but for the kernel it doesn't matter since it doesn't link against
80222 + * any of the gcc libraries
80223 + *
80224 + * gcc plugin to implement various sparse (source code checker) features
80225 + *
80226 + * TODO:
80227 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80228 + *
80229 + * BUGS:
80230 + * - none known
80231 + */
80232 +#include "gcc-plugin.h"
80233 +#include "config.h"
80234 +#include "system.h"
80235 +#include "coretypes.h"
80236 +#include "tree.h"
80237 +#include "tree-pass.h"
80238 +#include "flags.h"
80239 +#include "intl.h"
80240 +#include "toplev.h"
80241 +#include "plugin.h"
80242 +//#include "expr.h" where are you...
80243 +#include "diagnostic.h"
80244 +#include "plugin-version.h"
80245 +#include "tm.h"
80246 +#include "function.h"
80247 +#include "basic-block.h"
80248 +#include "gimple.h"
80249 +#include "rtl.h"
80250 +#include "emit-rtl.h"
80251 +#include "tree-flow.h"
80252 +#include "target.h"
80253 +
80254 +extern void c_register_addr_space (const char *str, addr_space_t as);
80255 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80256 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80257 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80258 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80259 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80260 +
80261 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80262 +extern rtx emit_move_insn(rtx x, rtx y);
80263 +
80264 +int plugin_is_GPL_compatible;
80265 +
80266 +static struct plugin_info checker_plugin_info = {
80267 + .version = "201111150100",
80268 +};
80269 +
80270 +#define ADDR_SPACE_KERNEL 0
80271 +#define ADDR_SPACE_FORCE_KERNEL 1
80272 +#define ADDR_SPACE_USER 2
80273 +#define ADDR_SPACE_FORCE_USER 3
80274 +#define ADDR_SPACE_IOMEM 0
80275 +#define ADDR_SPACE_FORCE_IOMEM 0
80276 +#define ADDR_SPACE_PERCPU 0
80277 +#define ADDR_SPACE_FORCE_PERCPU 0
80278 +#define ADDR_SPACE_RCU 0
80279 +#define ADDR_SPACE_FORCE_RCU 0
80280 +
80281 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80282 +{
80283 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80284 +}
80285 +
80286 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80287 +{
80288 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80289 +}
80290 +
80291 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80292 +{
80293 + return default_addr_space_valid_pointer_mode(mode, as);
80294 +}
80295 +
80296 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80297 +{
80298 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80299 +}
80300 +
80301 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80302 +{
80303 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80304 +}
80305 +
80306 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80307 +{
80308 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80309 + return true;
80310 +
80311 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80312 + return true;
80313 +
80314 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80315 + return true;
80316 +
80317 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80318 + return true;
80319 +
80320 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80321 + return true;
80322 +
80323 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80324 + return true;
80325 +
80326 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80327 + return true;
80328 +
80329 + return subset == superset;
80330 +}
80331 +
80332 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80333 +{
80334 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80335 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80336 +
80337 + return op;
80338 +}
80339 +
80340 +static void register_checker_address_spaces(void *event_data, void *data)
80341 +{
80342 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80343 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80344 + c_register_addr_space("__user", ADDR_SPACE_USER);
80345 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80346 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80347 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80348 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80349 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80350 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80351 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80352 +
80353 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80354 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80355 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80356 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80357 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80358 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80359 + targetm.addr_space.convert = checker_addr_space_convert;
80360 +}
80361 +
80362 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80363 +{
80364 + const char * const plugin_name = plugin_info->base_name;
80365 + const int argc = plugin_info->argc;
80366 + const struct plugin_argument * const argv = plugin_info->argv;
80367 + int i;
80368 +
80369 + if (!plugin_default_version_check(version, &gcc_version)) {
80370 + error(G_("incompatible gcc/plugin versions"));
80371 + return 1;
80372 + }
80373 +
80374 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80375 +
80376 + for (i = 0; i < argc; ++i)
80377 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80378 +
80379 + if (TARGET_64BIT == 0)
80380 + return 0;
80381 +
80382 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80383 +
80384 + return 0;
80385 +}
80386 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
80387 new file mode 100644
80388 index 0000000..ee950d0
80389 --- /dev/null
80390 +++ b/tools/gcc/colorize_plugin.c
80391 @@ -0,0 +1,147 @@
80392 +/*
80393 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
80394 + * Licensed under the GPL v2
80395 + *
80396 + * Note: the choice of the license means that the compilation process is
80397 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80398 + * but for the kernel it doesn't matter since it doesn't link against
80399 + * any of the gcc libraries
80400 + *
80401 + * gcc plugin to colorize diagnostic output
80402 + *
80403 + */
80404 +
80405 +#include "gcc-plugin.h"
80406 +#include "config.h"
80407 +#include "system.h"
80408 +#include "coretypes.h"
80409 +#include "tree.h"
80410 +#include "tree-pass.h"
80411 +#include "flags.h"
80412 +#include "intl.h"
80413 +#include "toplev.h"
80414 +#include "plugin.h"
80415 +#include "diagnostic.h"
80416 +#include "plugin-version.h"
80417 +#include "tm.h"
80418 +
80419 +int plugin_is_GPL_compatible;
80420 +
80421 +static struct plugin_info colorize_plugin_info = {
80422 + .version = "201203092200",
80423 +};
80424 +
80425 +#define GREEN "\033[32m\033[2m"
80426 +#define LIGHTGREEN "\033[32m\033[1m"
80427 +#define YELLOW "\033[33m\033[2m"
80428 +#define LIGHTYELLOW "\033[33m\033[1m"
80429 +#define RED "\033[31m\033[2m"
80430 +#define LIGHTRED "\033[31m\033[1m"
80431 +#define BLUE "\033[34m\033[2m"
80432 +#define LIGHTBLUE "\033[34m\033[1m"
80433 +#define BRIGHT "\033[m\033[1m"
80434 +#define NORMAL "\033[m"
80435 +
80436 +static diagnostic_starter_fn old_starter;
80437 +static diagnostic_finalizer_fn old_finalizer;
80438 +
80439 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80440 +{
80441 + const char *color;
80442 + char *newprefix;
80443 +
80444 + switch (diagnostic->kind) {
80445 + case DK_NOTE:
80446 + color = LIGHTBLUE;
80447 + break;
80448 +
80449 + case DK_PEDWARN:
80450 + case DK_WARNING:
80451 + color = LIGHTYELLOW;
80452 + break;
80453 +
80454 + case DK_ERROR:
80455 + case DK_FATAL:
80456 + case DK_ICE:
80457 + case DK_PERMERROR:
80458 + case DK_SORRY:
80459 + color = LIGHTRED;
80460 + break;
80461 +
80462 + default:
80463 + color = NORMAL;
80464 + }
80465 +
80466 + old_starter(context, diagnostic);
80467 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
80468 + return;
80469 + pp_destroy_prefix(context->printer);
80470 + pp_set_prefix(context->printer, newprefix);
80471 +}
80472 +
80473 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80474 +{
80475 + old_finalizer(context, diagnostic);
80476 +}
80477 +
80478 +static void colorize_arm(void)
80479 +{
80480 + old_starter = diagnostic_starter(global_dc);
80481 + old_finalizer = diagnostic_finalizer(global_dc);
80482 +
80483 + diagnostic_starter(global_dc) = start_colorize;
80484 + diagnostic_finalizer(global_dc) = finalize_colorize;
80485 +}
80486 +
80487 +static unsigned int execute_colorize_rearm(void)
80488 +{
80489 + if (diagnostic_starter(global_dc) == start_colorize)
80490 + return 0;
80491 +
80492 + colorize_arm();
80493 + return 0;
80494 +}
80495 +
80496 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
80497 + .pass = {
80498 + .type = SIMPLE_IPA_PASS,
80499 + .name = "colorize_rearm",
80500 + .gate = NULL,
80501 + .execute = execute_colorize_rearm,
80502 + .sub = NULL,
80503 + .next = NULL,
80504 + .static_pass_number = 0,
80505 + .tv_id = TV_NONE,
80506 + .properties_required = 0,
80507 + .properties_provided = 0,
80508 + .properties_destroyed = 0,
80509 + .todo_flags_start = 0,
80510 + .todo_flags_finish = 0
80511 + }
80512 +};
80513 +
80514 +static void colorize_start_unit(void *gcc_data, void *user_data)
80515 +{
80516 + colorize_arm();
80517 +}
80518 +
80519 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80520 +{
80521 + const char * const plugin_name = plugin_info->base_name;
80522 + struct register_pass_info colorize_rearm_pass_info = {
80523 + .pass = &pass_ipa_colorize_rearm.pass,
80524 + .reference_pass_name = "*free_lang_data",
80525 + .ref_pass_instance_number = 0,
80526 + .pos_op = PASS_POS_INSERT_AFTER
80527 + };
80528 +
80529 + if (!plugin_default_version_check(version, &gcc_version)) {
80530 + error(G_("incompatible gcc/plugin versions"));
80531 + return 1;
80532 + }
80533 +
80534 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
80535 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
80536 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
80537 + return 0;
80538 +}
80539 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80540 new file mode 100644
80541 index 0000000..88a7438
80542 --- /dev/null
80543 +++ b/tools/gcc/constify_plugin.c
80544 @@ -0,0 +1,303 @@
80545 +/*
80546 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80547 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80548 + * Licensed under the GPL v2, or (at your option) v3
80549 + *
80550 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80551 + *
80552 + * Homepage:
80553 + * http://www.grsecurity.net/~ephox/const_plugin/
80554 + *
80555 + * Usage:
80556 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80557 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80558 + */
80559 +
80560 +#include "gcc-plugin.h"
80561 +#include "config.h"
80562 +#include "system.h"
80563 +#include "coretypes.h"
80564 +#include "tree.h"
80565 +#include "tree-pass.h"
80566 +#include "flags.h"
80567 +#include "intl.h"
80568 +#include "toplev.h"
80569 +#include "plugin.h"
80570 +#include "diagnostic.h"
80571 +#include "plugin-version.h"
80572 +#include "tm.h"
80573 +#include "function.h"
80574 +#include "basic-block.h"
80575 +#include "gimple.h"
80576 +#include "rtl.h"
80577 +#include "emit-rtl.h"
80578 +#include "tree-flow.h"
80579 +
80580 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80581 +
80582 +int plugin_is_GPL_compatible;
80583 +
80584 +static struct plugin_info const_plugin_info = {
80585 + .version = "201111150100",
80586 + .help = "no-constify\tturn off constification\n",
80587 +};
80588 +
80589 +static void constify_type(tree type);
80590 +static bool walk_struct(tree node);
80591 +
80592 +static tree deconstify_type(tree old_type)
80593 +{
80594 + tree new_type, field;
80595 +
80596 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80597 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80598 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80599 + DECL_FIELD_CONTEXT(field) = new_type;
80600 + TYPE_READONLY(new_type) = 0;
80601 + C_TYPE_FIELDS_READONLY(new_type) = 0;
80602 + return new_type;
80603 +}
80604 +
80605 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80606 +{
80607 + tree type;
80608 +
80609 + *no_add_attrs = true;
80610 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80611 + error("%qE attribute does not apply to functions", name);
80612 + return NULL_TREE;
80613 + }
80614 +
80615 + if (TREE_CODE(*node) == VAR_DECL) {
80616 + error("%qE attribute does not apply to variables", name);
80617 + return NULL_TREE;
80618 + }
80619 +
80620 + if (TYPE_P(*node)) {
80621 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80622 + *no_add_attrs = false;
80623 + else
80624 + error("%qE attribute applies to struct and union types only", name);
80625 + return NULL_TREE;
80626 + }
80627 +
80628 + type = TREE_TYPE(*node);
80629 +
80630 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80631 + error("%qE attribute applies to struct and union types only", name);
80632 + return NULL_TREE;
80633 + }
80634 +
80635 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80636 + error("%qE attribute is already applied to the type", name);
80637 + return NULL_TREE;
80638 + }
80639 +
80640 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80641 + error("%qE attribute used on type that is not constified", name);
80642 + return NULL_TREE;
80643 + }
80644 +
80645 + if (TREE_CODE(*node) == TYPE_DECL) {
80646 + TREE_TYPE(*node) = deconstify_type(type);
80647 + TREE_READONLY(*node) = 0;
80648 + return NULL_TREE;
80649 + }
80650 +
80651 + return NULL_TREE;
80652 +}
80653 +
80654 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80655 +{
80656 + *no_add_attrs = true;
80657 + if (!TYPE_P(*node)) {
80658 + error("%qE attribute applies to types only", name);
80659 + return NULL_TREE;
80660 + }
80661 +
80662 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80663 + error("%qE attribute applies to struct and union types only", name);
80664 + return NULL_TREE;
80665 + }
80666 +
80667 + *no_add_attrs = false;
80668 + constify_type(*node);
80669 + return NULL_TREE;
80670 +}
80671 +
80672 +static struct attribute_spec no_const_attr = {
80673 + .name = "no_const",
80674 + .min_length = 0,
80675 + .max_length = 0,
80676 + .decl_required = false,
80677 + .type_required = false,
80678 + .function_type_required = false,
80679 + .handler = handle_no_const_attribute,
80680 +#if BUILDING_GCC_VERSION >= 4007
80681 + .affects_type_identity = true
80682 +#endif
80683 +};
80684 +
80685 +static struct attribute_spec do_const_attr = {
80686 + .name = "do_const",
80687 + .min_length = 0,
80688 + .max_length = 0,
80689 + .decl_required = false,
80690 + .type_required = false,
80691 + .function_type_required = false,
80692 + .handler = handle_do_const_attribute,
80693 +#if BUILDING_GCC_VERSION >= 4007
80694 + .affects_type_identity = true
80695 +#endif
80696 +};
80697 +
80698 +static void register_attributes(void *event_data, void *data)
80699 +{
80700 + register_attribute(&no_const_attr);
80701 + register_attribute(&do_const_attr);
80702 +}
80703 +
80704 +static void constify_type(tree type)
80705 +{
80706 + TYPE_READONLY(type) = 1;
80707 + C_TYPE_FIELDS_READONLY(type) = 1;
80708 +}
80709 +
80710 +static bool is_fptr(tree field)
80711 +{
80712 + tree ptr = TREE_TYPE(field);
80713 +
80714 + if (TREE_CODE(ptr) != POINTER_TYPE)
80715 + return false;
80716 +
80717 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80718 +}
80719 +
80720 +static bool walk_struct(tree node)
80721 +{
80722 + tree field;
80723 +
80724 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
80725 + return false;
80726 +
80727 + if (TYPE_FIELDS(node) == NULL_TREE)
80728 + return false;
80729 +
80730 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80731 + tree type = TREE_TYPE(field);
80732 + enum tree_code code = TREE_CODE(type);
80733 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80734 + if (!(walk_struct(type)))
80735 + return false;
80736 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80737 + return false;
80738 + }
80739 + return true;
80740 +}
80741 +
80742 +static void finish_type(void *event_data, void *data)
80743 +{
80744 + tree type = (tree)event_data;
80745 +
80746 + if (type == NULL_TREE)
80747 + return;
80748 +
80749 + if (TYPE_READONLY(type))
80750 + return;
80751 +
80752 + if (walk_struct(type))
80753 + constify_type(type);
80754 +}
80755 +
80756 +static unsigned int check_local_variables(void);
80757 +
80758 +struct gimple_opt_pass pass_local_variable = {
80759 + {
80760 + .type = GIMPLE_PASS,
80761 + .name = "check_local_variables",
80762 + .gate = NULL,
80763 + .execute = check_local_variables,
80764 + .sub = NULL,
80765 + .next = NULL,
80766 + .static_pass_number = 0,
80767 + .tv_id = TV_NONE,
80768 + .properties_required = 0,
80769 + .properties_provided = 0,
80770 + .properties_destroyed = 0,
80771 + .todo_flags_start = 0,
80772 + .todo_flags_finish = 0
80773 + }
80774 +};
80775 +
80776 +static unsigned int check_local_variables(void)
80777 +{
80778 + tree var;
80779 + referenced_var_iterator rvi;
80780 +
80781 +#if BUILDING_GCC_VERSION == 4005
80782 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80783 +#else
80784 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80785 +#endif
80786 + tree type = TREE_TYPE(var);
80787 +
80788 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80789 + continue;
80790 +
80791 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80792 + continue;
80793 +
80794 + if (!TYPE_READONLY(type))
80795 + continue;
80796 +
80797 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80798 +// continue;
80799 +
80800 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80801 +// continue;
80802 +
80803 + if (walk_struct(type)) {
80804 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
80805 + return 1;
80806 + }
80807 + }
80808 + return 0;
80809 +}
80810 +
80811 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80812 +{
80813 + const char * const plugin_name = plugin_info->base_name;
80814 + const int argc = plugin_info->argc;
80815 + const struct plugin_argument * const argv = plugin_info->argv;
80816 + int i;
80817 + bool constify = true;
80818 +
80819 + struct register_pass_info local_variable_pass_info = {
80820 + .pass = &pass_local_variable.pass,
80821 + .reference_pass_name = "*referenced_vars",
80822 + .ref_pass_instance_number = 0,
80823 + .pos_op = PASS_POS_INSERT_AFTER
80824 + };
80825 +
80826 + if (!plugin_default_version_check(version, &gcc_version)) {
80827 + error(G_("incompatible gcc/plugin versions"));
80828 + return 1;
80829 + }
80830 +
80831 + for (i = 0; i < argc; ++i) {
80832 + if (!(strcmp(argv[i].key, "no-constify"))) {
80833 + constify = false;
80834 + continue;
80835 + }
80836 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80837 + }
80838 +
80839 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80840 + if (constify) {
80841 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80842 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80843 + }
80844 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80845 +
80846 + return 0;
80847 +}
80848 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80849 new file mode 100644
80850 index 0000000..a5eabce
80851 --- /dev/null
80852 +++ b/tools/gcc/kallocstat_plugin.c
80853 @@ -0,0 +1,167 @@
80854 +/*
80855 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80856 + * Licensed under the GPL v2
80857 + *
80858 + * Note: the choice of the license means that the compilation process is
80859 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80860 + * but for the kernel it doesn't matter since it doesn't link against
80861 + * any of the gcc libraries
80862 + *
80863 + * gcc plugin to find the distribution of k*alloc sizes
80864 + *
80865 + * TODO:
80866 + *
80867 + * BUGS:
80868 + * - none known
80869 + */
80870 +#include "gcc-plugin.h"
80871 +#include "config.h"
80872 +#include "system.h"
80873 +#include "coretypes.h"
80874 +#include "tree.h"
80875 +#include "tree-pass.h"
80876 +#include "flags.h"
80877 +#include "intl.h"
80878 +#include "toplev.h"
80879 +#include "plugin.h"
80880 +//#include "expr.h" where are you...
80881 +#include "diagnostic.h"
80882 +#include "plugin-version.h"
80883 +#include "tm.h"
80884 +#include "function.h"
80885 +#include "basic-block.h"
80886 +#include "gimple.h"
80887 +#include "rtl.h"
80888 +#include "emit-rtl.h"
80889 +
80890 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80891 +
80892 +int plugin_is_GPL_compatible;
80893 +
80894 +static const char * const kalloc_functions[] = {
80895 + "__kmalloc",
80896 + "kmalloc",
80897 + "kmalloc_large",
80898 + "kmalloc_node",
80899 + "kmalloc_order",
80900 + "kmalloc_order_trace",
80901 + "kmalloc_slab",
80902 + "kzalloc",
80903 + "kzalloc_node",
80904 +};
80905 +
80906 +static struct plugin_info kallocstat_plugin_info = {
80907 + .version = "201111150100",
80908 +};
80909 +
80910 +static unsigned int execute_kallocstat(void);
80911 +
80912 +static struct gimple_opt_pass kallocstat_pass = {
80913 + .pass = {
80914 + .type = GIMPLE_PASS,
80915 + .name = "kallocstat",
80916 + .gate = NULL,
80917 + .execute = execute_kallocstat,
80918 + .sub = NULL,
80919 + .next = NULL,
80920 + .static_pass_number = 0,
80921 + .tv_id = TV_NONE,
80922 + .properties_required = 0,
80923 + .properties_provided = 0,
80924 + .properties_destroyed = 0,
80925 + .todo_flags_start = 0,
80926 + .todo_flags_finish = 0
80927 + }
80928 +};
80929 +
80930 +static bool is_kalloc(const char *fnname)
80931 +{
80932 + size_t i;
80933 +
80934 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80935 + if (!strcmp(fnname, kalloc_functions[i]))
80936 + return true;
80937 + return false;
80938 +}
80939 +
80940 +static unsigned int execute_kallocstat(void)
80941 +{
80942 + basic_block bb;
80943 +
80944 + // 1. loop through BBs and GIMPLE statements
80945 + FOR_EACH_BB(bb) {
80946 + gimple_stmt_iterator gsi;
80947 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80948 + // gimple match:
80949 + tree fndecl, size;
80950 + gimple call_stmt;
80951 + const char *fnname;
80952 +
80953 + // is it a call
80954 + call_stmt = gsi_stmt(gsi);
80955 + if (!is_gimple_call(call_stmt))
80956 + continue;
80957 + fndecl = gimple_call_fndecl(call_stmt);
80958 + if (fndecl == NULL_TREE)
80959 + continue;
80960 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80961 + continue;
80962 +
80963 + // is it a call to k*alloc
80964 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80965 + if (!is_kalloc(fnname))
80966 + continue;
80967 +
80968 + // is the size arg the result of a simple const assignment
80969 + size = gimple_call_arg(call_stmt, 0);
80970 + while (true) {
80971 + gimple def_stmt;
80972 + expanded_location xloc;
80973 + size_t size_val;
80974 +
80975 + if (TREE_CODE(size) != SSA_NAME)
80976 + break;
80977 + def_stmt = SSA_NAME_DEF_STMT(size);
80978 + if (!def_stmt || !is_gimple_assign(def_stmt))
80979 + break;
80980 + if (gimple_num_ops(def_stmt) != 2)
80981 + break;
80982 + size = gimple_assign_rhs1(def_stmt);
80983 + if (!TREE_CONSTANT(size))
80984 + continue;
80985 + xloc = expand_location(gimple_location(def_stmt));
80986 + if (!xloc.file)
80987 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80988 + size_val = TREE_INT_CST_LOW(size);
80989 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80990 + break;
80991 + }
80992 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80993 +//debug_tree(gimple_call_fn(call_stmt));
80994 +//print_node(stderr, "pax", fndecl, 4);
80995 + }
80996 + }
80997 +
80998 + return 0;
80999 +}
81000 +
81001 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81002 +{
81003 + const char * const plugin_name = plugin_info->base_name;
81004 + struct register_pass_info kallocstat_pass_info = {
81005 + .pass = &kallocstat_pass.pass,
81006 + .reference_pass_name = "ssa",
81007 + .ref_pass_instance_number = 0,
81008 + .pos_op = PASS_POS_INSERT_AFTER
81009 + };
81010 +
81011 + if (!plugin_default_version_check(version, &gcc_version)) {
81012 + error(G_("incompatible gcc/plugin versions"));
81013 + return 1;
81014 + }
81015 +
81016 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
81017 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
81018 +
81019 + return 0;
81020 +}
81021 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
81022 new file mode 100644
81023 index 0000000..d8a8da2
81024 --- /dev/null
81025 +++ b/tools/gcc/kernexec_plugin.c
81026 @@ -0,0 +1,427 @@
81027 +/*
81028 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81029 + * Licensed under the GPL v2
81030 + *
81031 + * Note: the choice of the license means that the compilation process is
81032 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81033 + * but for the kernel it doesn't matter since it doesn't link against
81034 + * any of the gcc libraries
81035 + *
81036 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
81037 + *
81038 + * TODO:
81039 + *
81040 + * BUGS:
81041 + * - none known
81042 + */
81043 +#include "gcc-plugin.h"
81044 +#include "config.h"
81045 +#include "system.h"
81046 +#include "coretypes.h"
81047 +#include "tree.h"
81048 +#include "tree-pass.h"
81049 +#include "flags.h"
81050 +#include "intl.h"
81051 +#include "toplev.h"
81052 +#include "plugin.h"
81053 +//#include "expr.h" where are you...
81054 +#include "diagnostic.h"
81055 +#include "plugin-version.h"
81056 +#include "tm.h"
81057 +#include "function.h"
81058 +#include "basic-block.h"
81059 +#include "gimple.h"
81060 +#include "rtl.h"
81061 +#include "emit-rtl.h"
81062 +#include "tree-flow.h"
81063 +
81064 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81065 +extern rtx emit_move_insn(rtx x, rtx y);
81066 +
81067 +int plugin_is_GPL_compatible;
81068 +
81069 +static struct plugin_info kernexec_plugin_info = {
81070 + .version = "201111291120",
81071 + .help = "method=[bts|or]\tinstrumentation method\n"
81072 +};
81073 +
81074 +static unsigned int execute_kernexec_reload(void);
81075 +static unsigned int execute_kernexec_fptr(void);
81076 +static unsigned int execute_kernexec_retaddr(void);
81077 +static bool kernexec_cmodel_check(void);
81078 +
81079 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
81080 +static void (*kernexec_instrument_retaddr)(rtx);
81081 +
81082 +static struct gimple_opt_pass kernexec_reload_pass = {
81083 + .pass = {
81084 + .type = GIMPLE_PASS,
81085 + .name = "kernexec_reload",
81086 + .gate = kernexec_cmodel_check,
81087 + .execute = execute_kernexec_reload,
81088 + .sub = NULL,
81089 + .next = NULL,
81090 + .static_pass_number = 0,
81091 + .tv_id = TV_NONE,
81092 + .properties_required = 0,
81093 + .properties_provided = 0,
81094 + .properties_destroyed = 0,
81095 + .todo_flags_start = 0,
81096 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81097 + }
81098 +};
81099 +
81100 +static struct gimple_opt_pass kernexec_fptr_pass = {
81101 + .pass = {
81102 + .type = GIMPLE_PASS,
81103 + .name = "kernexec_fptr",
81104 + .gate = kernexec_cmodel_check,
81105 + .execute = execute_kernexec_fptr,
81106 + .sub = NULL,
81107 + .next = NULL,
81108 + .static_pass_number = 0,
81109 + .tv_id = TV_NONE,
81110 + .properties_required = 0,
81111 + .properties_provided = 0,
81112 + .properties_destroyed = 0,
81113 + .todo_flags_start = 0,
81114 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81115 + }
81116 +};
81117 +
81118 +static struct rtl_opt_pass kernexec_retaddr_pass = {
81119 + .pass = {
81120 + .type = RTL_PASS,
81121 + .name = "kernexec_retaddr",
81122 + .gate = kernexec_cmodel_check,
81123 + .execute = execute_kernexec_retaddr,
81124 + .sub = NULL,
81125 + .next = NULL,
81126 + .static_pass_number = 0,
81127 + .tv_id = TV_NONE,
81128 + .properties_required = 0,
81129 + .properties_provided = 0,
81130 + .properties_destroyed = 0,
81131 + .todo_flags_start = 0,
81132 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
81133 + }
81134 +};
81135 +
81136 +static bool kernexec_cmodel_check(void)
81137 +{
81138 + tree section;
81139 +
81140 + if (ix86_cmodel != CM_KERNEL)
81141 + return false;
81142 +
81143 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81144 + if (!section || !TREE_VALUE(section))
81145 + return true;
81146 +
81147 + section = TREE_VALUE(TREE_VALUE(section));
81148 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
81149 + return true;
81150 +
81151 + return false;
81152 +}
81153 +
81154 +/*
81155 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
81156 + */
81157 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
81158 +{
81159 + gimple asm_movabs_stmt;
81160 +
81161 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
81162 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
81163 + gimple_asm_set_volatile(asm_movabs_stmt, true);
81164 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
81165 + update_stmt(asm_movabs_stmt);
81166 +}
81167 +
81168 +/*
81169 + * find all asm() stmts that clobber r10 and add a reload of r10
81170 + */
81171 +static unsigned int execute_kernexec_reload(void)
81172 +{
81173 + basic_block bb;
81174 +
81175 + // 1. loop through BBs and GIMPLE statements
81176 + FOR_EACH_BB(bb) {
81177 + gimple_stmt_iterator gsi;
81178 +
81179 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81180 + // gimple match: __asm__ ("" : : : "r10");
81181 + gimple asm_stmt;
81182 + size_t nclobbers;
81183 +
81184 + // is it an asm ...
81185 + asm_stmt = gsi_stmt(gsi);
81186 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
81187 + continue;
81188 +
81189 + // ... clobbering r10
81190 + nclobbers = gimple_asm_nclobbers(asm_stmt);
81191 + while (nclobbers--) {
81192 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
81193 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
81194 + continue;
81195 + kernexec_reload_fptr_mask(&gsi);
81196 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
81197 + break;
81198 + }
81199 + }
81200 + }
81201 +
81202 + return 0;
81203 +}
81204 +
81205 +/*
81206 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
81207 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
81208 + */
81209 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
81210 +{
81211 + gimple assign_intptr, assign_new_fptr, call_stmt;
81212 + tree intptr, old_fptr, new_fptr, kernexec_mask;
81213 +
81214 + call_stmt = gsi_stmt(*gsi);
81215 + old_fptr = gimple_call_fn(call_stmt);
81216 +
81217 + // create temporary unsigned long variable used for bitops and cast fptr to it
81218 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
81219 + add_referenced_var(intptr);
81220 + mark_sym_for_renaming(intptr);
81221 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
81222 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81223 + update_stmt(assign_intptr);
81224 +
81225 + // apply logical or to temporary unsigned long and bitmask
81226 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
81227 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
81228 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
81229 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81230 + update_stmt(assign_intptr);
81231 +
81232 + // cast temporary unsigned long back to a temporary fptr variable
81233 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
81234 + add_referenced_var(new_fptr);
81235 + mark_sym_for_renaming(new_fptr);
81236 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
81237 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
81238 + update_stmt(assign_new_fptr);
81239 +
81240 + // replace call stmt fn with the new fptr
81241 + gimple_call_set_fn(call_stmt, new_fptr);
81242 + update_stmt(call_stmt);
81243 +}
81244 +
81245 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
81246 +{
81247 + gimple asm_or_stmt, call_stmt;
81248 + tree old_fptr, new_fptr, input, output;
81249 + VEC(tree, gc) *inputs = NULL;
81250 + VEC(tree, gc) *outputs = NULL;
81251 +
81252 + call_stmt = gsi_stmt(*gsi);
81253 + old_fptr = gimple_call_fn(call_stmt);
81254 +
81255 + // create temporary fptr variable
81256 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
81257 + add_referenced_var(new_fptr);
81258 + mark_sym_for_renaming(new_fptr);
81259 +
81260 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81261 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81262 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81263 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81264 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81265 + VEC_safe_push(tree, gc, inputs, input);
81266 + VEC_safe_push(tree, gc, outputs, output);
81267 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81268 + gimple_asm_set_volatile(asm_or_stmt, true);
81269 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
81270 + update_stmt(asm_or_stmt);
81271 +
81272 + // replace call stmt fn with the new fptr
81273 + gimple_call_set_fn(call_stmt, new_fptr);
81274 + update_stmt(call_stmt);
81275 +}
81276 +
81277 +/*
81278 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81279 + */
81280 +static unsigned int execute_kernexec_fptr(void)
81281 +{
81282 + basic_block bb;
81283 +
81284 + // 1. loop through BBs and GIMPLE statements
81285 + FOR_EACH_BB(bb) {
81286 + gimple_stmt_iterator gsi;
81287 +
81288 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81289 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81290 + tree fn;
81291 + gimple call_stmt;
81292 +
81293 + // is it a call ...
81294 + call_stmt = gsi_stmt(gsi);
81295 + if (!is_gimple_call(call_stmt))
81296 + continue;
81297 + fn = gimple_call_fn(call_stmt);
81298 + if (TREE_CODE(fn) == ADDR_EXPR)
81299 + continue;
81300 + if (TREE_CODE(fn) != SSA_NAME)
81301 + gcc_unreachable();
81302 +
81303 + // ... through a function pointer
81304 + fn = SSA_NAME_VAR(fn);
81305 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81306 + continue;
81307 + fn = TREE_TYPE(fn);
81308 + if (TREE_CODE(fn) != POINTER_TYPE)
81309 + continue;
81310 + fn = TREE_TYPE(fn);
81311 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81312 + continue;
81313 +
81314 + kernexec_instrument_fptr(&gsi);
81315 +
81316 +//debug_tree(gimple_call_fn(call_stmt));
81317 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81318 + }
81319 + }
81320 +
81321 + return 0;
81322 +}
81323 +
81324 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81325 +static void kernexec_instrument_retaddr_bts(rtx insn)
81326 +{
81327 + rtx btsq;
81328 + rtvec argvec, constraintvec, labelvec;
81329 + int line;
81330 +
81331 + // create asm volatile("btsq $63,(%%rsp)":::)
81332 + argvec = rtvec_alloc(0);
81333 + constraintvec = rtvec_alloc(0);
81334 + labelvec = rtvec_alloc(0);
81335 + line = expand_location(RTL_LOCATION(insn)).line;
81336 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81337 + MEM_VOLATILE_P(btsq) = 1;
81338 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81339 + emit_insn_before(btsq, insn);
81340 +}
81341 +
81342 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81343 +static void kernexec_instrument_retaddr_or(rtx insn)
81344 +{
81345 + rtx orq;
81346 + rtvec argvec, constraintvec, labelvec;
81347 + int line;
81348 +
81349 + // create asm volatile("orq %%r10,(%%rsp)":::)
81350 + argvec = rtvec_alloc(0);
81351 + constraintvec = rtvec_alloc(0);
81352 + labelvec = rtvec_alloc(0);
81353 + line = expand_location(RTL_LOCATION(insn)).line;
81354 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81355 + MEM_VOLATILE_P(orq) = 1;
81356 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81357 + emit_insn_before(orq, insn);
81358 +}
81359 +
81360 +/*
81361 + * find all asm level function returns and forcibly set the highest bit of the return address
81362 + */
81363 +static unsigned int execute_kernexec_retaddr(void)
81364 +{
81365 + rtx insn;
81366 +
81367 + // 1. find function returns
81368 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81369 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81370 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81371 + rtx body;
81372 +
81373 + // is it a retn
81374 + if (!JUMP_P(insn))
81375 + continue;
81376 + body = PATTERN(insn);
81377 + if (GET_CODE(body) == PARALLEL)
81378 + body = XVECEXP(body, 0, 0);
81379 + if (GET_CODE(body) != RETURN)
81380 + continue;
81381 + kernexec_instrument_retaddr(insn);
81382 + }
81383 +
81384 +// print_simple_rtl(stderr, get_insns());
81385 +// print_rtl(stderr, get_insns());
81386 +
81387 + return 0;
81388 +}
81389 +
81390 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81391 +{
81392 + const char * const plugin_name = plugin_info->base_name;
81393 + const int argc = plugin_info->argc;
81394 + const struct plugin_argument * const argv = plugin_info->argv;
81395 + int i;
81396 + struct register_pass_info kernexec_reload_pass_info = {
81397 + .pass = &kernexec_reload_pass.pass,
81398 + .reference_pass_name = "ssa",
81399 + .ref_pass_instance_number = 0,
81400 + .pos_op = PASS_POS_INSERT_AFTER
81401 + };
81402 + struct register_pass_info kernexec_fptr_pass_info = {
81403 + .pass = &kernexec_fptr_pass.pass,
81404 + .reference_pass_name = "ssa",
81405 + .ref_pass_instance_number = 0,
81406 + .pos_op = PASS_POS_INSERT_AFTER
81407 + };
81408 + struct register_pass_info kernexec_retaddr_pass_info = {
81409 + .pass = &kernexec_retaddr_pass.pass,
81410 + .reference_pass_name = "pro_and_epilogue",
81411 + .ref_pass_instance_number = 0,
81412 + .pos_op = PASS_POS_INSERT_AFTER
81413 + };
81414 +
81415 + if (!plugin_default_version_check(version, &gcc_version)) {
81416 + error(G_("incompatible gcc/plugin versions"));
81417 + return 1;
81418 + }
81419 +
81420 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81421 +
81422 + if (TARGET_64BIT == 0)
81423 + return 0;
81424 +
81425 + for (i = 0; i < argc; ++i) {
81426 + if (!strcmp(argv[i].key, "method")) {
81427 + if (!argv[i].value) {
81428 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81429 + continue;
81430 + }
81431 + if (!strcmp(argv[i].value, "bts")) {
81432 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81433 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81434 + } else if (!strcmp(argv[i].value, "or")) {
81435 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81436 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81437 + fix_register("r10", 1, 1);
81438 + } else
81439 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81440 + continue;
81441 + }
81442 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81443 + }
81444 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81445 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81446 +
81447 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
81448 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
81449 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81450 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81451 +
81452 + return 0;
81453 +}
81454 diff --git a/tools/gcc/size_overflow_hash1.h b/tools/gcc/size_overflow_hash1.h
81455 new file mode 100644
81456 index 0000000..16ccac1
81457 --- /dev/null
81458 +++ b/tools/gcc/size_overflow_hash1.h
81459 @@ -0,0 +1,3047 @@
81460 +struct size_overflow_hash size_overflow_hash1[65536] = {
81461 + [10013].file = "security/smack/smackfs.c",
81462 + [10013].name = "smk_write_direct",
81463 + [10013].param3 = 1,
81464 + [10167].file = "sound/core/oss/pcm_plugin.c",
81465 + [10167].name = "snd_pcm_plugin_build",
81466 + [10167].param5 = 1,
81467 + [1020].file = "drivers/usb/misc/usbtest.c",
81468 + [1020].name = "test_unaligned_bulk",
81469 + [1020].param3 = 1,
81470 + [1022].file = "sound/pci/rme9652/rme9652.c",
81471 + [1022].name = "snd_rme9652_playback_copy",
81472 + [1022].param5 = 1,
81473 + [10321].file = "drivers/platform/x86/thinkpad_acpi.c",
81474 + [10321].name = "create_attr_set",
81475 + [10321].param1 = 1,
81476 + [10341].file = "fs/nfsd/nfs4xdr.c",
81477 + [10341].name = "read_buf",
81478 + [10341].param2 = 1,
81479 + [10357].file = "net/sunrpc/cache.c",
81480 + [10357].name = "cache_read",
81481 + [10357].param3 = 1,
81482 + [10397].file = "drivers/gpu/drm/i915/i915_debugfs.c",
81483 + [10397].name = "i915_wedged_write",
81484 + [10397].param3 = 1,
81485 + [10399].file = "kernel/trace/trace.c",
81486 + [10399].name = "trace_seq_to_user",
81487 + [10399].param3 = 1,
81488 + [10414].file = "drivers/tty/vt/vt.c",
81489 + [10414].name = "vc_do_resize",
81490 + [10414].param3 = 1,
81491 + [10414].param4 = 1,
81492 + [10565].file = "drivers/input/touchscreen/ad7879-spi.c",
81493 + [10565].name = "ad7879_spi_multi_read",
81494 + [10565].param3 = 1,
81495 + [10623].file = "drivers/infiniband/core/user_mad.c",
81496 + [10623].name = "ib_umad_write",
81497 + [10623].param3 = 1,
81498 + [10707].file = "fs/nfs/idmap.c",
81499 + [10707].name = "nfs_idmap_request_key",
81500 + [10707].param2 = 1,
81501 + [1073].file = "drivers/block/aoe/aoecmd.c",
81502 + [1073].name = "addtgt",
81503 + [1073].param3 = 1,
81504 + [10745].file = "fs/cifs/connect.c",
81505 + [10745].name = "get_server_iovec",
81506 + [10745].param2 = 1,
81507 + [10750].file = "drivers/net/wireless/iwmc3200wifi/rx.c",
81508 + [10750].name = "iwm_ntf_calib_res",
81509 + [10750].param3 = 1,
81510 + [10773].file = "drivers/input/mousedev.c",
81511 + [10773].name = "mousedev_read",
81512 + [10773].param3 = 1,
81513 + [10777].file = "fs/ntfs/file.c",
81514 + [10777].name = "ntfs_file_buffered_write",
81515 + [10777].param6 = 1,
81516 + [10893].file = "drivers/misc/sgi-gru/gruprocfs.c",
81517 + [10893].name = "options_write",
81518 + [10893].param3 = 1,
81519 + [10919].file = "net/ipv4/netfilter/arp_tables.c",
81520 + [10919].name = "do_arpt_set_ctl",
81521 + [10919].param4 = 1,
81522 + [1107].file = "mm/process_vm_access.c",
81523 + [1107].name = "process_vm_rw_single_vec",
81524 + [1107].param1 = 1,
81525 + [1107].param2 = 1,
81526 + [11230].file = "net/core/neighbour.c",
81527 + [11230].name = "neigh_hash_grow",
81528 + [11230].param2 = 1,
81529 + [11364].file = "fs/ext4/super.c",
81530 + [11364].name = "ext4_kvzalloc",
81531 + [11364].param1 = 1,
81532 + [114].file = "security/selinux/selinuxfs.c",
81533 + [114].name = "sel_write_relabel",
81534 + [114].param3 = 1,
81535 + [11549].file = "drivers/media/rc/redrat3.c",
81536 + [11549].name = "redrat3_transmit_ir",
81537 + [11549].param3 = 1,
81538 + [11568].file = "drivers/gpu/drm/drm_scatter.c",
81539 + [11568].name = "drm_vmalloc_dma",
81540 + [11568].param1 = 1,
81541 + [11582].file = "drivers/scsi/lpfc/lpfc_sli.c",
81542 + [11582].name = "lpfc_sli4_queue_alloc",
81543 + [11582].param3 = 1,
81544 + [11616].file = "security/selinux/selinuxfs.c",
81545 + [11616].name = "sel_write_enforce",
81546 + [11616].param3 = 1,
81547 + [11699].file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
81548 + [11699].name = "vxge_os_dma_malloc",
81549 + [11699].param2 = 1,
81550 + [11766].file = "drivers/block/paride/pt.c",
81551 + [11766].name = "pt_read",
81552 + [11766].param3 = 1,
81553 + [11784].file = "fs/bio.c",
81554 + [11784].name = "bio_kmalloc",
81555 + [11784].param2 = 1,
81556 + [11919].file = "drivers/lguest/core.c",
81557 + [11919].name = "__lgread",
81558 + [11919].param4 = 1,
81559 + [11925].file = "drivers/media/video/cx18/cx18-fileops.c",
81560 + [11925].name = "cx18_copy_mdl_to_user",
81561 + [11925].param4 = 1,
81562 + [11985].file = "drivers/block/floppy.c",
81563 + [11985].name = "fd_copyin",
81564 + [11985].param3 = 1,
81565 + [11986].file = "drivers/net/usb/asix.c",
81566 + [11986].name = "asix_read_cmd",
81567 + [11986].param5 = 1,
81568 + [12018].file = "sound/core/oss/pcm_oss.c",
81569 + [12018].name = "snd_pcm_oss_read1",
81570 + [12018].param3 = 1,
81571 + [12059].file = "drivers/net/wireless/libertas/debugfs.c",
81572 + [12059].name = "lbs_debugfs_write",
81573 + [12059].param3 = 1,
81574 + [12151].file = "fs/compat.c",
81575 + [12151].name = "compat_rw_copy_check_uvector",
81576 + [12151].param3 = 1,
81577 + [12205].file = "fs/reiserfs/journal.c",
81578 + [12205].name = "reiserfs_allocate_list_bitmaps",
81579 + [12205].param3 = 1,
81580 + [12234].file = "include/acpi/platform/aclinux.h",
81581 + [12234].name = "acpi_os_allocate",
81582 + [12234].param1 = 1,
81583 + [1227].file = "lib/cpu_rmap.c",
81584 + [1227].name = "alloc_cpu_rmap",
81585 + [1227].param1 = 1,
81586 + [12395].file = "drivers/char/hw_random/core.c",
81587 + [12395].name = "rng_dev_read",
81588 + [12395].param3 = 1,
81589 + [12602].file = "net/sunrpc/cache.c",
81590 + [12602].name = "cache_downcall",
81591 + [12602].param3 = 1,
81592 + [12712].file = "drivers/net/wimax/i2400m/fw.c",
81593 + [12712].name = "i2400m_zrealloc_2x",
81594 + [12712].param3 = 1,
81595 + [12755].file = "sound/drivers/opl4/opl4_proc.c",
81596 + [12755].name = "snd_opl4_mem_proc_read",
81597 + [12755].param5 = 1,
81598 + [12833].file = "net/sctp/auth.c",
81599 + [12833].name = "sctp_auth_create_key",
81600 + [12833].param1 = 1,
81601 + [12840].file = "net/sctp/tsnmap.c",
81602 + [12840].name = "sctp_tsnmap_mark",
81603 + [12840].param2 = 1,
81604 + [12931].file = "drivers/hid/hid-roccat.c",
81605 + [12931].name = "roccat_read",
81606 + [12931].param3 = 1,
81607 + [12954].file = "fs/proc/base.c",
81608 + [12954].name = "oom_adjust_write",
81609 + [12954].param3 = 1,
81610 + [13103].file = "drivers/acpi/acpica/utobject.c",
81611 + [13103].name = "acpi_ut_create_string_object",
81612 + [13103].param1 = 1,
81613 + [13121].file = "net/ipv4/ip_sockglue.c",
81614 + [13121].name = "do_ip_setsockopt",
81615 + [13121].param5 = 1,
81616 + [1327].file = "net/netfilter/nfnetlink_log.c",
81617 + [1327].name = "nfulnl_alloc_skb",
81618 + [1327].param2 = 1,
81619 + [13337].file = "net/core/iovec.c",
81620 + [13337].name = "csum_partial_copy_fromiovecend",
81621 + [13337].param4 = 1,
81622 + [13339].file = "security/smack/smackfs.c",
81623 + [13339].name = "smk_write_netlbladdr",
81624 + [13339].param3 = 1,
81625 + [13342].file = "fs/jbd2/journal.c",
81626 + [13342].name = "jbd2_alloc",
81627 + [13342].param1 = 1,
81628 + [13384].file = "drivers/char/virtio_console.c",
81629 + [13384].name = "alloc_buf",
81630 + [13384].param1 = 1,
81631 + [13412].file = "fs/proc/base.c",
81632 + [13412].name = "oom_score_adj_write",
81633 + [13412].param3 = 1,
81634 + [13559].file = "drivers/media/video/ivtv/ivtv-fileops.c",
81635 + [13559].name = "ivtv_read",
81636 + [13559].param3 = 1,
81637 + [13618].file = "drivers/net/team/team.c",
81638 + [13618].name = "team_options_register",
81639 + [13618].param3 = 1,
81640 + [13659].file = "drivers/net/wan/hdlc.c",
81641 + [13659].name = "attach_hdlc_protocol",
81642 + [13659].param3 = 1,
81643 + [13708].file = "drivers/usb/misc/usbtest.c",
81644 + [13708].name = "simple_alloc_urb",
81645 + [13708].param3 = 1,
81646 + [13805].file = "drivers/misc/altera-stapl/altera-jtag.c",
81647 + [13805].name = "altera_swap_dr",
81648 + [13805].param2 = 1,
81649 + [13868].file = "fs/lockd/mon.c",
81650 + [13868].name = "nsm_create_handle",
81651 + [13868].param4 = 1,
81652 + [13924].file = "net/ipv4/netfilter/ip_tables.c",
81653 + [13924].name = "do_ipt_set_ctl",
81654 + [13924].param4 = 1,
81655 + [14019].file = "net/dns_resolver/dns_key.c",
81656 + [14019].name = "dns_resolver_instantiate",
81657 + [14019].param2 = 1,
81658 + [14019].param3 = 1,
81659 + [14025].file = "net/ax25/af_ax25.c",
81660 + [14025].name = "ax25_setsockopt",
81661 + [14025].param5 = 1,
81662 + [14029].file = "drivers/spi/spidev.c",
81663 + [14029].name = "spidev_compat_ioctl",
81664 + [14029].param2 = 1,
81665 + [14090].file = "drivers/bluetooth/btmrvl_debugfs.c",
81666 + [14090].name = "btmrvl_hsmode_write",
81667 + [14090].param3 = 1,
81668 + [14149].file = "drivers/hid/hidraw.c",
81669 + [14149].name = "hidraw_ioctl",
81670 + [14149].param2 = 1,
81671 + [14153].file = "drivers/staging/bcm/led_control.c",
81672 + [14153].name = "ValidateDSDParamsChecksum",
81673 + [14153].param3 = 1,
81674 + [14174].file = "sound/pci/es1938.c",
81675 + [14174].name = "snd_es1938_capture_copy",
81676 + [14174].param5 = 1,
81677 + [14207].file = "drivers/media/video/v4l2-event.c",
81678 + [14207].name = "v4l2_event_subscribe",
81679 + [14207].param3 = 1,
81680 + [14241].file = "drivers/platform/x86/asus_acpi.c",
81681 + [14241].name = "brn_proc_write",
81682 + [14241].param3 = 1,
81683 + [14345].file = "fs/cachefiles/daemon.c",
81684 + [14345].name = "cachefiles_daemon_write",
81685 + [14345].param3 = 1,
81686 + [14347].file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
81687 + [14347].name = "dvb_ca_en50221_io_write",
81688 + [14347].param3 = 1,
81689 + [14566].file = "drivers/pci/hotplug/ibmphp_ebda.c",
81690 + [14566].name = "alloc_ebda_hpc",
81691 + [14566].param1 = 1,
81692 + [14566].param2 = 1,
81693 + [1458].file = "drivers/misc/lkdtm.c",
81694 + [1458].name = "direct_entry",
81695 + [1458].param3 = 1,
81696 + [14646].file = "fs/compat.c",
81697 + [14646].name = "compat_writev",
81698 + [14646].param3 = 1,
81699 + [14684].file = "drivers/media/video/stk-webcam.c",
81700 + [14684].name = "stk_allocate_buffers",
81701 + [14684].param2 = 1,
81702 + [14736].file = "drivers/usb/misc/usbtest.c",
81703 + [14736].name = "unlink_queued",
81704 + [14736].param3 = 1,
81705 + [1482].file = "drivers/scsi/scsi_netlink.c",
81706 + [1482].name = "scsi_nl_send_vendor_msg",
81707 + [1482].param5 = 1,
81708 + [15017].file = "drivers/edac/edac_device.c",
81709 + [15017].name = "edac_device_alloc_ctl_info",
81710 + [15017].param1 = 1,
81711 + [15044].file = "drivers/uio/uio.c",
81712 + [15044].name = "uio_write",
81713 + [15044].param3 = 1,
81714 + [15087].file = "fs/bio.c",
81715 + [15087].name = "bio_map_kern",
81716 + [15087].param2 = 1,
81717 + [15087].param3 = 1,
81718 + [15112].file = "drivers/xen/evtchn.c",
81719 + [15112].name = "evtchn_write",
81720 + [15112].param3 = 1,
81721 + [15130].file = "net/bluetooth/hci_core.c",
81722 + [15130].name = "hci_send_cmd",
81723 + [15130].param3 = 1,
81724 + [15202].file = "net/bluetooth/rfcomm/tty.c",
81725 + [15202].name = "rfcomm_wmalloc",
81726 + [15202].param2 = 1,
81727 + [15274].file = "crypto/shash.c",
81728 + [15274].name = "crypto_shash_setkey",
81729 + [15274].param3 = 1,
81730 + [15354].file = "drivers/isdn/mISDN/socket.c",
81731 + [15354].name = "mISDN_sock_sendmsg",
81732 + [15354].param4 = 1,
81733 + [15361].file = "drivers/char/agp/generic.c",
81734 + [15361].name = "agp_allocate_memory",
81735 + [15361].param2 = 1,
81736 + [15497].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
81737 + [15497].name = "ts_read",
81738 + [15497].param3 = 1,
81739 + [15551].file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
81740 + [15551].name = "clusterip_proc_write",
81741 + [15551].param3 = 1,
81742 + [15701].file = "drivers/hid/hid-roccat-common.c",
81743 + [15701].name = "roccat_common_receive",
81744 + [15701].param4 = 1,
81745 + [1572].file = "net/ceph/pagevec.c",
81746 + [1572].name = "ceph_copy_page_vector_to_user",
81747 + [1572].param4 = 1,
81748 + [15814].file = "net/mac80211/debugfs_netdev.c",
81749 + [15814].name = "ieee80211_if_write",
81750 + [15814].param3 = 1,
81751 + [15883].file = "security/keys/keyctl.c",
81752 + [15883].name = "sys_add_key",
81753 + [15883].param4 = 1,
81754 + [15884].file = "fs/exofs/super.c",
81755 + [15884].name = "exofs_read_lookup_dev_table",
81756 + [15884].param3 = 1,
81757 + [16037].file = "drivers/staging/media/easycap/easycap_sound.c",
81758 + [16037].name = "easycap_alsa_vmalloc",
81759 + [16037].param2 = 1,
81760 + [16073].file = "net/sctp/socket.c",
81761 + [16073].name = "sctp_setsockopt",
81762 + [16073].param5 = 1,
81763 + [16132].file = "drivers/staging/vme/devices/vme_user.c",
81764 + [16132].name = "buffer_from_user",
81765 + [16132].param3 = 1,
81766 + [16138].file = "security/selinux/ss/services.c",
81767 + [16138].name = "security_context_to_sid_force",
81768 + [16138].param2 = 1,
81769 + [16166].file = "drivers/platform/x86/thinkpad_acpi.c",
81770 + [16166].name = "dispatch_proc_write",
81771 + [16166].param3 = 1,
81772 + [16229].file = "drivers/scsi/scsi_transport_iscsi.c",
81773 + [16229].name = "iscsi_offload_mesg",
81774 + [16229].param5 = 1,
81775 + [16353].file = "drivers/base/regmap/regmap.c",
81776 + [16353].name = "regmap_raw_write",
81777 + [16353].param4 = 1,
81778 + [16383].file = "fs/proc/base.c",
81779 + [16383].name = "comm_write",
81780 + [16383].param3 = 1,
81781 + [16396].file = "drivers/misc/altera-stapl/altera-jtag.c",
81782 + [16396].name = "altera_irscan",
81783 + [16396].param2 = 1,
81784 + [16447].file = "drivers/hid/usbhid/hiddev.c",
81785 + [16447].name = "hiddev_ioctl",
81786 + [16447].param2 = 1,
81787 + [16453].file = "include/linux/slab.h",
81788 + [16453].name = "kzalloc",
81789 + [16453].param1 = 1,
81790 + [16605].file = "fs/ecryptfs/miscdev.c",
81791 + [16605].name = "ecryptfs_send_miscdev",
81792 + [16605].param2 = 1,
81793 + [16606].file = "drivers/ide/ide-tape.c",
81794 + [16606].name = "idetape_chrdev_write",
81795 + [16606].param3 = 1,
81796 + [16637].file = "security/keys/encrypted-keys/encrypted.c",
81797 + [16637].name = "datablob_hmac_verify",
81798 + [16637].param4 = 1,
81799 + [16828].file = "net/batman-adv/hash.c",
81800 + [16828].name = "hash_new",
81801 + [16828].param1 = 1,
81802 + [16853].file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
81803 + [16853].name = "t4vf_pktgl_to_skb",
81804 + [16853].param2 = 1,
81805 + [16911].file = "drivers/media/dvb/ttpci/av7110_hw.c",
81806 + [16911].name = "LoadBitmap",
81807 + [16911].param2 = 1,
81808 + [169].file = "drivers/net/ethernet/amd/pcnet32.c",
81809 + [169].name = "pcnet32_realloc_rx_ring",
81810 + [169].param3 = 1,
81811 + [17075].file = "sound/isa/gus/gus_dram.c",
81812 + [17075].name = "snd_gus_dram_write",
81813 + [17075].param4 = 1,
81814 + [17133].file = "drivers/usb/misc/iowarrior.c",
81815 + [17133].name = "iowarrior_read",
81816 + [17133].param3 = 1,
81817 + [17185].file = "net/wireless/scan.c",
81818 + [17185].name = "cfg80211_inform_bss",
81819 + [17185].param8 = 1,
81820 + [17349].file = "net/tipc/link.c",
81821 + [17349].name = "tipc_link_send_sections_fast",
81822 + [17349].param4 = 1,
81823 + [17377].file = "drivers/usb/class/cdc-wdm.c",
81824 + [17377].name = "wdm_write",
81825 + [17377].param3 = 1,
81826 + [17459].file = "drivers/usb/misc/rio500.c",
81827 + [17459].name = "write_rio",
81828 + [17459].param3 = 1,
81829 + [17460].file = "fs/nfsd/nfscache.c",
81830 + [17460].name = "nfsd_cache_update",
81831 + [17460].param3 = 1,
81832 + [17492].file = "net/dccp/proto.c",
81833 + [17492].name = "do_dccp_setsockopt",
81834 + [17492].param5 = 1,
81835 + [1754].file = "sound/core/oss/pcm_oss.c",
81836 + [1754].name = "snd_pcm_oss_write",
81837 + [1754].param3 = 1,
81838 + [17604].file = "fs/proc/generic.c",
81839 + [17604].name = "__proc_file_read",
81840 + [17604].param3 = 1,
81841 + [17718].file = "net/caif/caif_socket.c",
81842 + [17718].name = "setsockopt",
81843 + [17718].param5 = 1,
81844 + [17828].file = "kernel/sched/core.c",
81845 + [17828].name = "sched_feat_write",
81846 + [17828].param3 = 1,
81847 + [17841].file = "drivers/misc/tifm_core.c",
81848 + [17841].name = "tifm_alloc_adapter",
81849 + [17841].param1 = 1,
81850 + [17946].file = "drivers/net/wireless/libertas/if_spi.c",
81851 + [17946].name = "if_spi_host_to_card",
81852 + [17946].param4 = 1,
81853 + [1800].file = "drivers/media/dvb/dvb-core/dmxdev.c",
81854 + [1800].name = "dvb_dvr_do_ioctl",
81855 + [1800].param3 = 1,
81856 + [18119].file = "drivers/misc/iwmc3200top/fw-download.c",
81857 + [18119].name = "iwmct_fw_parser_init",
81858 + [18119].param4 = 1,
81859 + [18140].file = "drivers/scsi/pm8001/pm8001_ctl.c",
81860 + [18140].name = "pm8001_store_update_fw",
81861 + [18140].param4 = 1,
81862 + [18191].file = "sound/pci/hda/patch_realtek.c",
81863 + [18191].name = "new_bind_ctl",
81864 + [18191].param2 = 1,
81865 + [18224].file = "drivers/xen/grant-table.c",
81866 + [18224].name = "gnttab_map",
81867 + [18224].param2 = 1,
81868 + [18232].file = "fs/nfs/write.c",
81869 + [18232].name = "nfs_writedata_alloc",
81870 + [18232].param1 = 1,
81871 + [18247].file = "drivers/char/agp/generic.c",
81872 + [18247].name = "agp_create_user_memory",
81873 + [18247].param1 = 1,
81874 + [18303].file = "fs/xattr.c",
81875 + [18303].name = "getxattr",
81876 + [18303].param4 = 1,
81877 + [18353].file = "net/rfkill/core.c",
81878 + [18353].name = "rfkill_fop_read",
81879 + [18353].param3 = 1,
81880 + [18386].file = "fs/read_write.c",
81881 + [18386].name = "vfs_readv",
81882 + [18386].param3 = 1,
81883 + [18391].file = "fs/ocfs2/stack_user.c",
81884 + [18391].name = "ocfs2_control_write",
81885 + [18391].param3 = 1,
81886 + [183].file = "crypto/ahash.c",
81887 + [183].name = "crypto_ahash_setkey",
81888 + [183].param3 = 1,
81889 + [18406].file = "drivers/media/video/tm6000/tm6000-core.c",
81890 + [18406].name = "tm6000_read_write_usb",
81891 + [18406].param7 = 1,
81892 + [1845].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
81893 + [1845].name = "rt2x00debug_write_rf",
81894 + [1845].param3 = 1,
81895 + [18465].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
81896 + [18465].name = "cxgb_alloc_mem",
81897 + [18465].param1 = 1,
81898 + [184].file = "drivers/firewire/nosy.c",
81899 + [184].name = "packet_buffer_init",
81900 + [184].param2 = 1,
81901 + [1858].file = "net/ipv6/netfilter/ip6_tables.c",
81902 + [1858].name = "do_ip6t_set_ctl",
81903 + [1858].param4 = 1,
81904 + [18659].file = "drivers/media/dvb/dvb-core/dvbdev.c",
81905 + [18659].name = "dvb_usercopy",
81906 + [18659].param2 = 1,
81907 + [18722].file = "security/tomoyo/condition.c",
81908 + [18722].name = "tomoyo_scan_bprm",
81909 + [18722].param2 = 1,
81910 + [18722].param4 = 1,
81911 + [18775].file = "include/linux/textsearch.h",
81912 + [18775].name = "alloc_ts_config",
81913 + [18775].param1 = 1,
81914 + [18940].file = "drivers/usb/host/hwa-hc.c",
81915 + [18940].name = "__hwahc_op_set_gtk",
81916 + [18940].param4 = 1,
81917 + [19012].file = "drivers/acpi/event.c",
81918 + [19012].name = "acpi_system_read_event",
81919 + [19012].param3 = 1,
81920 + [19028].file = "mm/filemap.c",
81921 + [19028].name = "iov_iter_copy_from_user_atomic",
81922 + [19028].param4 = 1,
81923 + [19107].file = "security/smack/smackfs.c",
81924 + [19107].name = "smk_write_load_list",
81925 + [19107].param3 = 1,
81926 + [19240].file = "net/sctp/socket.c",
81927 + [19240].name = "sctp_setsockopt_delayed_ack",
81928 + [19240].param3 = 1,
81929 + [19274].file = "net/core/pktgen.c",
81930 + [19274].name = "pktgen_if_write",
81931 + [19274].param3 = 1,
81932 + [19286].file = "drivers/base/regmap/regmap.c",
81933 + [19286].name = "_regmap_raw_write",
81934 + [19286].param4 = 1,
81935 + [19308].file = "drivers/char/mem.c",
81936 + [19308].name = "read_oldmem",
81937 + [19308].param3 = 1,
81938 + [19343].file = "security/keys/encrypted-keys/encrypted.c",
81939 + [19343].name = "datablob_hmac_append",
81940 + [19343].param3 = 1,
81941 + [19349].file = "drivers/acpi/acpica/utobject.c",
81942 + [19349].name = "acpi_ut_create_package_object",
81943 + [19349].param1 = 1,
81944 + [19453].file = "drivers/net/ethernet/chelsio/cxgb/sge.c",
81945 + [19453].name = "sge_rx",
81946 + [19453].param3 = 1,
81947 + [19504].file = "drivers/usb/serial/garmin_gps.c",
81948 + [19504].name = "pkt_add",
81949 + [19504].param3 = 1,
81950 + [19522].file = "mm/percpu.c",
81951 + [19522].name = "pcpu_mem_zalloc",
81952 + [19522].param1 = 1,
81953 + [19548].file = "drivers/scsi/qla2xxx/qla_init.c",
81954 + [19548].name = "qla2x00_get_ctx_sp",
81955 + [19548].param3 = 1,
81956 + [19592].file = "net/dccp/proto.c",
81957 + [19592].name = "dccp_setsockopt_service",
81958 + [19592].param4 = 1,
81959 + [19726].file = "kernel/trace/trace.c",
81960 + [19726].name = "tracing_set_trace_write",
81961 + [19726].param3 = 1,
81962 + [19738].file = "fs/sysfs/file.c",
81963 + [19738].name = "sysfs_write_file",
81964 + [19738].param3 = 1,
81965 + [19833].file = "drivers/xen/privcmd.c",
81966 + [19833].name = "gather_array",
81967 + [19833].param3 = 1,
81968 + [19910].file = "drivers/media/video/saa7164/saa7164-buffer.c",
81969 + [19910].name = "saa7164_buffer_alloc_user",
81970 + [19910].param2 = 1,
81971 + [19920].file = "drivers/input/joydev.c",
81972 + [19920].name = "joydev_ioctl",
81973 + [19920].param2 = 1,
81974 + [19931].file = "drivers/usb/misc/ftdi-elan.c",
81975 + [19931].name = "ftdi_elan_write",
81976 + [19931].param3 = 1,
81977 + [19960].file = "drivers/usb/class/usblp.c",
81978 + [19960].name = "usblp_read",
81979 + [19960].param3 = 1,
81980 + [1996].file = "drivers/scsi/libsrp.c",
81981 + [1996].name = "srp_target_alloc",
81982 + [1996].param3 = 1,
81983 + [20023].file = "drivers/media/video/gspca/gspca.c",
81984 + [20023].name = "dev_read",
81985 + [20023].param3 = 1,
81986 + [20207].file = "net/core/sock.c",
81987 + [20207].name = "sock_alloc_send_pskb",
81988 + [20207].param2 = 1,
81989 + [20263].file = "kernel/trace/trace_events.c",
81990 + [20263].name = "event_filter_write",
81991 + [20263].param3 = 1,
81992 + [20314].file = "drivers/gpu/drm/drm_hashtab.c",
81993 + [20314].name = "drm_ht_create",
81994 + [20314].param2 = 1,
81995 + [20320].file = "drivers/mfd/sm501.c",
81996 + [20320].name = "sm501_create_subdev",
81997 + [20320].param3 = 1,
81998 + [20320].param4 = 1,
81999 + [20376].file = "mm/nobootmem.c",
82000 + [20376].name = "__alloc_bootmem_nopanic",
82001 + [20376].param1 = 1,
82002 + [20409].file = "drivers/media/dvb/dvb-usb/opera1.c",
82003 + [20409].name = "opera1_usb_i2c_msgxfer",
82004 + [20409].param4 = 1,
82005 + [20473].file = "drivers/mtd/mtdchar.c",
82006 + [20473].name = "mtdchar_write",
82007 + [20473].param3 = 1,
82008 + [20611].file = "net/netfilter/x_tables.c",
82009 + [20611].name = "xt_alloc_table_info",
82010 + [20611].param1 = 1,
82011 + [20618].file = "drivers/staging/crystalhd/crystalhd_lnx.c",
82012 + [20618].name = "chd_dec_fetch_cdata",
82013 + [20618].param3 = 1,
82014 + [20713].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
82015 + [20713].name = "ttm_bo_io",
82016 + [20713].param5 = 1,
82017 + [20801].file = "drivers/vhost/vhost.c",
82018 + [20801].name = "vhost_add_used_n",
82019 + [20801].param3 = 1,
82020 + [20835].file = "drivers/isdn/i4l/isdn_common.c",
82021 + [20835].name = "isdn_read",
82022 + [20835].param3 = 1,
82023 + [20951].file = "crypto/rng.c",
82024 + [20951].name = "rngapi_reset",
82025 + [20951].param3 = 1,
82026 + [21125].file = "fs/gfs2/dir.c",
82027 + [21125].name = "gfs2_alloc_sort_buffer",
82028 + [21125].param1 = 1,
82029 + [21132].file = "kernel/cgroup.c",
82030 + [21132].name = "cgroup_write_X64",
82031 + [21132].param5 = 1,
82032 + [21138].file = "drivers/uio/uio.c",
82033 + [21138].name = "uio_read",
82034 + [21138].param3 = 1,
82035 + [21193].file = "net/wireless/sme.c",
82036 + [21193].name = "cfg80211_disconnected",
82037 + [21193].param4 = 1,
82038 + [21312].file = "lib/ts_kmp.c",
82039 + [21312].name = "kmp_init",
82040 + [21312].param2 = 1,
82041 + [21335].file = "net/econet/af_econet.c",
82042 + [21335].name = "econet_sendmsg",
82043 + [21335].param4 = 1,
82044 + [21406].file = "fs/libfs.c",
82045 + [21406].name = "simple_write_to_buffer",
82046 + [21406].param2 = 1,
82047 + [21406].param5 = 1,
82048 + [21451].file = "net/netfilter/ipvs/ip_vs_ctl.c",
82049 + [21451].name = "do_ip_vs_set_ctl",
82050 + [21451].param4 = 1,
82051 + [21459].file = "security/smack/smackfs.c",
82052 + [21459].name = "smk_write_doi",
82053 + [21459].param3 = 1,
82054 + [21508].file = "include/linux/usb/wusb.h",
82055 + [21508].name = "wusb_prf_64",
82056 + [21508].param7 = 1,
82057 + [21511].file = "drivers/input/ff-core.c",
82058 + [21511].name = "input_ff_create",
82059 + [21511].param2 = 1,
82060 + [21538].file = "net/bluetooth/l2cap_sock.c",
82061 + [21538].name = "l2cap_sock_setsockopt",
82062 + [21538].param5 = 1,
82063 + [21543].file = "drivers/media/video/gspca/gspca.c",
82064 + [21543].name = "frame_alloc",
82065 + [21543].param4 = 1,
82066 + [21608].file = "drivers/char/tpm/tpm.c",
82067 + [21608].name = "tpm_write",
82068 + [21608].param3 = 1,
82069 + [2160].file = "drivers/net/wireless/ray_cs.c",
82070 + [2160].name = "int_proc_write",
82071 + [2160].param3 = 1,
82072 + [21632].file = "fs/afs/cell.c",
82073 + [21632].name = "afs_cell_create",
82074 + [21632].param2 = 1,
82075 + [21679].file = "drivers/net/wireless/ath/carl9170/debug.c",
82076 + [21679].name = "carl9170_debugfs_write",
82077 + [21679].param3 = 1,
82078 + [21784].file = "crypto/ahash.c",
82079 + [21784].name = "ahash_setkey_unaligned",
82080 + [21784].param3 = 1,
82081 + [2180].file = "drivers/char/ppdev.c",
82082 + [2180].name = "pp_write",
82083 + [2180].param3 = 1,
82084 + [21810].file = "net/core/netprio_cgroup.c",
82085 + [21810].name = "extend_netdev_table",
82086 + [21810].param2 = 1,
82087 + [21906].file = "net/atm/mpc.c",
82088 + [21906].name = "copy_macs",
82089 + [21906].param4 = 1,
82090 + [21946].file = "fs/nfs/idmap.c",
82091 + [21946].name = "nfs_map_name_to_uid",
82092 + [21946].param3 = 1,
82093 + [22052].file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
82094 + [22052].name = "get_packet_pg",
82095 + [22052].param4 = 1,
82096 + [22085].file = "drivers/staging/sep/sep_driver.c",
82097 + [22085].name = "sep_lock_user_pages",
82098 + [22085].param2 = 1,
82099 + [22085].param3 = 1,
82100 + [22190].file = "drivers/char/tpm/tpm.c",
82101 + [22190].name = "tpm_read",
82102 + [22190].param3 = 1,
82103 + [22291].file = "net/core/pktgen.c",
82104 + [22291].name = "pgctrl_write",
82105 + [22291].param3 = 1,
82106 + [22439].file = "fs/afs/rxrpc.c",
82107 + [22439].name = "afs_alloc_flat_call",
82108 + [22439].param2 = 1,
82109 + [22439].param3 = 1,
82110 + [2243].file = "drivers/scsi/scsi_tgt_lib.c",
82111 + [2243].name = "scsi_tgt_kspace_exec",
82112 + [2243].param8 = 1,
82113 + [22440].file = "drivers/uwb/neh.c",
82114 + [22440].name = "uwb_rc_neh_grok_event",
82115 + [22440].param3 = 1,
82116 + [22611].file = "drivers/staging/android/logger.c",
82117 + [22611].name = "do_write_log_from_user",
82118 + [22611].param3 = 1,
82119 + [22614].file = "drivers/media/video/cx18/cx18-fileops.c",
82120 + [22614].name = "cx18_copy_buf_to_user",
82121 + [22614].param4 = 1,
82122 + [22667].file = "drivers/misc/altera-stapl/altera-jtag.c",
82123 + [22667].name = "altera_set_ir_post",
82124 + [22667].param2 = 1,
82125 + [22772].file = "drivers/target/iscsi/iscsi_target_erl1.c",
82126 + [22772].name = "iscsit_dump_data_payload",
82127 + [22772].param2 = 1,
82128 + [22777].file = "drivers/infiniband/ulp/srp/ib_srp.c",
82129 + [22777].name = "srp_alloc_iu",
82130 + [22777].param2 = 1,
82131 + [22811].file = "drivers/usb/dwc3/debugfs.c",
82132 + [22811].name = "dwc3_mode_write",
82133 + [22811].param3 = 1,
82134 + [22817].file = "drivers/media/video/usbvision/usbvision-core.c",
82135 + [22817].name = "usbvision_rvmalloc",
82136 + [22817].param1 = 1,
82137 + [22864].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82138 + [22864].name = "ath6kl_add_bss_if_needed",
82139 + [22864].param6 = 1,
82140 + [2286].file = "drivers/scsi/mvumi.c",
82141 + [2286].name = "mvumi_alloc_mem_resource",
82142 + [2286].param3 = 1,
82143 + [22904].file = "security/selinux/ss/services.c",
82144 + [22904].name = "security_context_to_sid_default",
82145 + [22904].param2 = 1,
82146 + [22932].file = "fs/compat.c",
82147 + [22932].name = "compat_sys_writev",
82148 + [22932].param3 = 1,
82149 + [2302].file = "drivers/media/video/stk-webcam.c",
82150 + [2302].name = "v4l_stk_read",
82151 + [2302].param3 = 1,
82152 + [2307].file = "drivers/pcmcia/cistpl.c",
82153 + [2307].name = "pcmcia_replace_cis",
82154 + [2307].param3 = 1,
82155 + [23117].file = "drivers/media/dvb/ttpci/av7110_av.c",
82156 + [23117].name = "dvb_audio_write",
82157 + [23117].param3 = 1,
82158 + [23220].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
82159 + [23220].name = "do_dmabuf_dirty_sou",
82160 + [23220].param7 = 1,
82161 + [23232].file = "drivers/md/persistent-data/dm-space-map-checker.c",
82162 + [23232].name = "sm_checker_extend",
82163 + [23232].param2 = 1,
82164 + [2324].file = "net/ieee802154/wpan-class.c",
82165 + [2324].name = "wpan_phy_alloc",
82166 + [2324].param1 = 1,
82167 + [2328].file = "kernel/trace/ftrace.c",
82168 + [2328].name = "ftrace_pid_write",
82169 + [2328].param3 = 1,
82170 + [23290].file = "fs/proc/base.c",
82171 + [23290].name = "mem_rw",
82172 + [23290].param3 = 1,
82173 + [23449].file = "crypto/blkcipher.c",
82174 + [23449].name = "blkcipher_next_slow",
82175 + [23449].param3 = 1,
82176 + [23449].param4 = 1,
82177 + [23535].file = "ipc/sem.c",
82178 + [23535].name = "sys_semtimedop",
82179 + [23535].param3 = 1,
82180 + [2357].file = "drivers/usb/serial/garmin_gps.c",
82181 + [2357].name = "garmin_read_process",
82182 + [2357].param3 = 1,
82183 + [23589].file = "kernel/relay.c",
82184 + [23589].name = "subbuf_read_actor",
82185 + [23589].param3 = 1,
82186 + [23848].file = "crypto/blkcipher.c",
82187 + [23848].name = "async_setkey",
82188 + [23848].param3 = 1,
82189 + [2386].file = "drivers/acpi/acpica/exnames.c",
82190 + [2386].name = "acpi_ex_allocate_name_string",
82191 + [2386].param2 = 1,
82192 + [2389].file = "net/core/sock.c",
82193 + [2389].name = "sock_rmalloc",
82194 + [2389].param2 = 1,
82195 + [23994].file = "net/bluetooth/mgmt.c",
82196 + [23994].name = "set_powered",
82197 + [23994].param4 = 1,
82198 + [23999].file = "sound/pci/rme9652/hdsp.c",
82199 + [23999].name = "snd_hdsp_capture_copy",
82200 + [23999].param5 = 1,
82201 + [24233].file = "drivers/pci/pcie/aer/aer_inject.c",
82202 + [24233].name = "aer_inject_write",
82203 + [24233].param3 = 1,
82204 + [24359].file = "kernel/power/qos.c",
82205 + [24359].name = "pm_qos_power_write",
82206 + [24359].param3 = 1,
82207 + [24457].file = "fs/btrfs/backref.c",
82208 + [24457].name = "init_data_container",
82209 + [24457].param1 = 1,
82210 + [24719].file = "drivers/input/evdev.c",
82211 + [24719].name = "bits_to_user",
82212 + [24719].param3 = 1,
82213 + [2472].file = "net/ipv4/netfilter/ip_tables.c",
82214 + [2472].name = "compat_do_ipt_set_ctl",
82215 + [2472].param4 = 1,
82216 + [24755].file = "drivers/infiniband/hw/qib/qib_diag.c",
82217 + [24755].name = "qib_diag_write",
82218 + [24755].param3 = 1,
82219 + [24805].file = "security/keys/user_defined.c",
82220 + [24805].name = "user_update",
82221 + [24805].param3 = 1,
82222 + [25036].file = "fs/pipe.c",
82223 + [25036].name = "pipe_iov_copy_from_user",
82224 + [25036].param3 = 1,
82225 + [25078].file = "drivers/net/wireless/p54/fwio.c",
82226 + [25078].name = "p54_download_eeprom",
82227 + [25078].param4 = 1,
82228 + [25127].file = "drivers/scsi/device_handler/scsi_dh_alua.c",
82229 + [25127].name = "realloc_buffer",
82230 + [25127].param2 = 1,
82231 + [25145].file = "net/tipc/link.c",
82232 + [25145].name = "link_send_sections_long",
82233 + [25145].param4 = 1,
82234 + [25157].file = "security/keys/request_key_auth.c",
82235 + [25157].name = "request_key_auth_new",
82236 + [25157].param3 = 1,
82237 + [25158].file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
82238 + [25158].name = "mlx4_en_create_rx_ring",
82239 + [25158].param3 = 1,
82240 + [25267].file = "fs/configfs/file.c",
82241 + [25267].name = "configfs_write_file",
82242 + [25267].param3 = 1,
82243 + [25495].file = "drivers/scsi/bfa/bfad_debugfs.c",
82244 + [25495].name = "bfad_debugfs_write_regwr",
82245 + [25495].param3 = 1,
82246 + [25558].file = "fs/proc/task_mmu.c",
82247 + [25558].name = "clear_refs_write",
82248 + [25558].param3 = 1,
82249 + [25692].file = "drivers/net/wireless/ath/ath6kl/wmi.c",
82250 + [25692].name = "ath6kl_wmi_send_action_cmd",
82251 + [25692].param7 = 1,
82252 + [25765].file = "drivers/media/dvb/b2c2/flexcop.c",
82253 + [25765].name = "flexcop_device_kmalloc",
82254 + [25765].param1 = 1,
82255 + [26100].file = "sound/core/info.c",
82256 + [26100].name = "snd_info_entry_write",
82257 + [26100].param3 = 1,
82258 + [26256].file = "fs/hpfs/name.c",
82259 + [26256].name = "hpfs_translate_name",
82260 + [26256].param3 = 1,
82261 + [26394].file = "drivers/hid/hidraw.c",
82262 + [26394].name = "hidraw_get_report",
82263 + [26394].param3 = 1,
82264 + [26494].file = "kernel/signal.c",
82265 + [26494].name = "sys_rt_sigpending",
82266 + [26494].param2 = 1,
82267 + [26497].file = "security/keys/keyctl.c",
82268 + [26497].name = "sys_keyctl",
82269 + [26497].param4 = 1,
82270 + [26533].file = "drivers/block/aoe/aoechr.c",
82271 + [26533].name = "aoechr_write",
82272 + [26533].param3 = 1,
82273 + [26560].file = "crypto/algapi.c",
82274 + [26560].name = "crypto_alloc_instance2",
82275 + [26560].param3 = 1,
82276 + [26605].file = "security/selinux/selinuxfs.c",
82277 + [26605].name = "sel_write_user",
82278 + [26605].param3 = 1,
82279 + [26620].file = "net/bluetooth/mgmt.c",
82280 + [26620].name = "mgmt_control",
82281 + [26620].param3 = 1,
82282 + [26701].file = "drivers/mtd/chips/cfi_util.c",
82283 + [26701].name = "cfi_read_pri",
82284 + [26701].param3 = 1,
82285 + [26757].file = "fs/xattr.c",
82286 + [26757].name = "sys_fgetxattr",
82287 + [26757].param4 = 1,
82288 + [2678].file = "drivers/platform/x86/asus_acpi.c",
82289 + [2678].name = "disp_proc_write",
82290 + [2678].param3 = 1,
82291 + [26834].file = "drivers/gpu/drm/drm_drv.c",
82292 + [26834].name = "drm_ioctl",
82293 + [26834].param2 = 1,
82294 + [26843].file = "drivers/firewire/core-cdev.c",
82295 + [26843].name = "fw_device_op_compat_ioctl",
82296 + [26843].param2 = 1,
82297 + [26845].file = "drivers/scsi/qla2xxx/qla_bsg.c",
82298 + [26845].name = "qla2x00_get_ctx_bsg_sp",
82299 + [26845].param3 = 1,
82300 + [26888].file = "net/bridge/br_ioctl.c",
82301 + [26888].name = "get_fdb_entries",
82302 + [26888].param3 = 1,
82303 + [26962].file = "drivers/usb/class/usbtmc.c",
82304 + [26962].name = "usbtmc_write",
82305 + [26962].param3 = 1,
82306 + [26966].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
82307 + [26966].name = "ts_write",
82308 + [26966].param3 = 1,
82309 + [27004].file = "drivers/misc/hpilo.c",
82310 + [27004].name = "ilo_write",
82311 + [27004].param3 = 1,
82312 + [27025].file = "fs/ntfs/file.c",
82313 + [27025].name = "__ntfs_copy_from_user_iovec_inatomic",
82314 + [27025].param3 = 1,
82315 + [27025].param4 = 1,
82316 + [27061].file = "drivers/firewire/core-cdev.c",
82317 + [27061].name = "iso_callback",
82318 + [27061].param3 = 1,
82319 + [2711].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
82320 + [2711].name = "dvb_ringbuffer_read_user",
82321 + [2711].param3 = 1,
82322 + [27129].file = "fs/lockd/mon.c",
82323 + [27129].name = "nsm_get_handle",
82324 + [27129].param4 = 1,
82325 + [27142].file = "fs/proc/kcore.c",
82326 + [27142].name = "read_kcore",
82327 + [27142].param3 = 1,
82328 + [27164].file = "include/drm/drm_mem_util.h",
82329 + [27164].name = "drm_calloc_large",
82330 + [27164].param1 = 1,
82331 + [27164].param2 = 1,
82332 + [27176].file = "drivers/mtd/devices/mtd_dataflash.c",
82333 + [27176].name = "otp_read",
82334 + [27176].param2 = 1,
82335 + [27176].param5 = 1,
82336 + [27232].file = "security/apparmor/lib.c",
82337 + [27232].name = "kvmalloc",
82338 + [27232].param1 = 1,
82339 + [27275].file = "drivers/scsi/cxgbi/libcxgbi.c",
82340 + [27275].name = "cxgbi_ddp_reserve",
82341 + [27275].param4 = 1,
82342 + [27280].file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
82343 + [27280].name = "mlx4_en_create_tx_ring",
82344 + [27280].param4 = 1,
82345 + [27290].file = "security/selinux/ss/services.c",
82346 + [27290].name = "security_context_to_sid_core",
82347 + [27290].param2 = 1,
82348 + [27302].file = "fs/proc/base.c",
82349 + [27302].name = "proc_loginuid_write",
82350 + [27302].param3 = 1,
82351 + [2730].file = "drivers/target/iscsi/iscsi_target_parameters.c",
82352 + [2730].name = "iscsi_decode_text_input",
82353 + [2730].param4 = 1,
82354 + [27314].file = "net/bluetooth/mgmt.c",
82355 + [27314].name = "cmd_complete",
82356 + [27314].param5 = 1,
82357 + [27472].file = "security/selinux/selinuxfs.c",
82358 + [27472].name = "sel_write_load",
82359 + [27472].param3 = 1,
82360 + [27491].file = "fs/proc/base.c",
82361 + [27491].name = "proc_pid_attr_write",
82362 + [27491].param3 = 1,
82363 + [27568].file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
82364 + [27568].name = "t4_alloc_mem",
82365 + [27568].param1 = 1,
82366 + [27582].file = "drivers/platform/x86/asus_acpi.c",
82367 + [27582].name = "ledd_proc_write",
82368 + [27582].param3 = 1,
82369 + [27595].file = "net/core/sock.c",
82370 + [27595].name = "sock_alloc_send_skb",
82371 + [27595].param2 = 1,
82372 + [27648].file = "net/bluetooth/l2cap_core.c",
82373 + [27648].name = "l2cap_bredr_sig_cmd",
82374 + [27648].param3 = 1,
82375 + [27697].file = "drivers/staging/mei/iorw.c",
82376 + [27697].name = "amthi_read",
82377 + [27697].param4 = 1,
82378 + [27911].file = "fs/ext4/resize.c",
82379 + [27911].name = "alloc_flex_gd",
82380 + [27911].param1 = 1,
82381 + [27927].file = "drivers/tty/tty_io.c",
82382 + [27927].name = "redirected_tty_write",
82383 + [27927].param3 = 1,
82384 + [28040].file = "kernel/kfifo.c",
82385 + [28040].name = "__kfifo_alloc",
82386 + [28040].param2 = 1,
82387 + [28040].param3 = 1,
82388 + [28151].file = "mm/filemap_xip.c",
82389 + [28151].name = "do_xip_mapping_read",
82390 + [28151].param5 = 1,
82391 + [28247].file = "net/sctp/tsnmap.c",
82392 + [28247].name = "sctp_tsnmap_init",
82393 + [28247].param2 = 1,
82394 + [28253].file = "include/linux/fb.h",
82395 + [28253].name = "alloc_apertures",
82396 + [28253].param1 = 1,
82397 + [28265].file = "fs/notify/fanotify/fanotify_user.c",
82398 + [28265].name = "fanotify_write",
82399 + [28265].param3 = 1,
82400 + [28316].file = "drivers/input/joydev.c",
82401 + [28316].name = "joydev_ioctl_common",
82402 + [28316].param2 = 1,
82403 + [28359].file = "drivers/spi/spidev.c",
82404 + [28359].name = "spidev_message",
82405 + [28359].param3 = 1,
82406 + [28360].file = "drivers/hid/usbhid/hiddev.c",
82407 + [28360].name = "hiddev_compat_ioctl",
82408 + [28360].param2 = 1,
82409 + [28407].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
82410 + [28407].name = "rt2x00debug_write_csr",
82411 + [28407].param3 = 1,
82412 + [2847].file = "fs/ntfs/file.c",
82413 + [2847].name = "ntfs_copy_from_user",
82414 + [2847].param3 = 1,
82415 + [2847].param5 = 1,
82416 + [28584].file = "drivers/memstick/core/memstick.c",
82417 + [28584].name = "memstick_alloc_host",
82418 + [28584].param1 = 1,
82419 + [28783].file = "drivers/gpu/drm/i915/i915_debugfs.c",
82420 + [28783].name = "i915_cache_sharing_write",
82421 + [28783].param3 = 1,
82422 + [28787].file = "drivers/media/video/videobuf2-core.c",
82423 + [28787].name = "vb2_write",
82424 + [28787].param3 = 1,
82425 + [28879].file = "drivers/base/map.c",
82426 + [28879].name = "kobj_map",
82427 + [28879].param2 = 1,
82428 + [28879].param3 = 1,
82429 + [28889].file = "drivers/char/pcmcia/cm4040_cs.c",
82430 + [28889].name = "cm4040_write",
82431 + [28889].param3 = 1,
82432 + [29073].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
82433 + [29073].name = "vmw_kms_readback",
82434 + [29073].param6 = 1,
82435 + [29085].file = "security/apparmor/apparmorfs.c",
82436 + [29085].name = "profile_load",
82437 + [29085].param3 = 1,
82438 + [29092].file = "lib/lru_cache.c",
82439 + [29092].name = "lc_create",
82440 + [29092].param3 = 1,
82441 + [29257].file = "drivers/vhost/vhost.c",
82442 + [29257].name = "vhost_add_used_and_signal_n",
82443 + [29257].param4 = 1,
82444 + [29267].file = "net/ipv4/fib_trie.c",
82445 + [29267].name = "tnode_alloc",
82446 + [29267].param1 = 1,
82447 + [29338].file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
82448 + [29338].name = "bnad_debugfs_write_regwr",
82449 + [29338].param3 = 1,
82450 + [29353].file = "net/sctp/socket.c",
82451 + [29353].name = "sctp_setsockopt_del_key",
82452 + [29353].param3 = 1,
82453 + [29405].file = "drivers/media/dvb/dvb-usb/dw2102.c",
82454 + [29405].name = "dw210x_op_rw",
82455 + [29405].param6 = 1,
82456 + [29542].file = "net/nfc/nci/core.c",
82457 + [29542].name = "nci_send_cmd",
82458 + [29542].param3 = 1,
82459 + [29714].file = "drivers/scsi/cxgbi/libcxgbi.c",
82460 + [29714].name = "cxgbi_device_register",
82461 + [29714].param1 = 1,
82462 + [29714].param2 = 1,
82463 + [2972].file = "drivers/staging/crystalhd/crystalhd_misc.c",
82464 + [2972].name = "crystalhd_create_dio_pool",
82465 + [2972].param2 = 1,
82466 + [29769].file = "drivers/misc/iwmc3200top/log.c",
82467 + [29769].name = "store_iwmct_log_level",
82468 + [29769].param4 = 1,
82469 + [29792].file = "drivers/staging/bcm/nvm.c",
82470 + [29792].name = "BcmCopySection",
82471 + [29792].param5 = 1,
82472 + [29859].file = "net/rds/page.c",
82473 + [29859].name = "rds_page_copy_user",
82474 + [29859].param4 = 1,
82475 + [29905].file = "mm/nobootmem.c",
82476 + [29905].name = "___alloc_bootmem",
82477 + [29905].param1 = 1,
82478 + [2995].file = "mm/page_alloc.c",
82479 + [2995].name = "alloc_large_system_hash",
82480 + [2995].param2 = 1,
82481 + [30000].file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
82482 + [30000].name = "wlc_phy_loadsampletable_nphy",
82483 + [30000].param3 = 1,
82484 + [30242].file = "fs/cifs/cifssmb.c",
82485 + [30242].name = "cifs_readdata_alloc",
82486 + [30242].param1 = 1,
82487 + [30494].file = "net/ceph/buffer.c",
82488 + [30494].name = "ceph_buffer_new",
82489 + [30494].param1 = 1,
82490 + [30590].file = "security/tomoyo/memory.c",
82491 + [30590].name = "tomoyo_commit_ok",
82492 + [30590].param2 = 1,
82493 + [3060].file = "lib/mpi/mpiutil.c",
82494 + [3060].name = "mpi_alloc_limb_space",
82495 + [3060].param1 = 1,
82496 + [30687].file = "drivers/uwb/uwb-debug.c",
82497 + [30687].name = "command_write",
82498 + [30687].param3 = 1,
82499 + [30726].file = "drivers/bluetooth/hci_vhci.c",
82500 + [30726].name = "vhci_get_user",
82501 + [30726].param3 = 1,
82502 + [30873].file = "net/packet/af_packet.c",
82503 + [30873].name = "alloc_one_pg_vec_page",
82504 + [30873].param1 = 1,
82505 + [30970].file = "drivers/staging/hv/storvsc_drv.c",
82506 + [30970].name = "create_bounce_buffer",
82507 + [30970].param3 = 1,
82508 + [310].file = "drivers/block/drbd/drbd_bitmap.c",
82509 + [310].name = "bm_realloc_pages",
82510 + [310].param2 = 1,
82511 + [3119].file = "drivers/misc/ibmasm/command.c",
82512 + [3119].name = "ibmasm_new_command",
82513 + [3119].param2 = 1,
82514 + [31207].file = "drivers/platform/x86/asus_acpi.c",
82515 + [31207].name = "parse_arg",
82516 + [31207].param2 = 1,
82517 + [31287].file = "drivers/scsi/libsrp.c",
82518 + [31287].name = "srp_iu_pool_alloc",
82519 + [31287].param2 = 1,
82520 + [31291].file = "sound/pci/rme9652/rme9652.c",
82521 + [31291].name = "snd_rme9652_capture_copy",
82522 + [31291].param5 = 1,
82523 + [31348].file = "kernel/sched/core.c",
82524 + [31348].name = "sys_sched_getaffinity",
82525 + [31348].param2 = 1,
82526 + [31492].file = "drivers/hid/hidraw.c",
82527 + [31492].name = "hidraw_read",
82528 + [31492].param3 = 1,
82529 + [3170].file = "security/integrity/ima/ima_fs.c",
82530 + [3170].name = "ima_write_policy",
82531 + [3170].param3 = 1,
82532 + [31782].file = "drivers/misc/pti.c",
82533 + [31782].name = "pti_char_write",
82534 + [31782].param3 = 1,
82535 + [31789].file = "fs/file.c",
82536 + [31789].name = "alloc_fdmem",
82537 + [31789].param1 = 1,
82538 + [31957].file = "fs/afs/proc.c",
82539 + [31957].name = "afs_proc_cells_write",
82540 + [31957].param3 = 1,
82541 + [32002].file = "net/sctp/socket.c",
82542 + [32002].name = "sctp_setsockopt_active_key",
82543 + [32002].param3 = 1,
82544 + [32182].file = "net/sunrpc/cache.c",
82545 + [32182].name = "cache_write",
82546 + [32182].param3 = 1,
82547 + [32278].file = "kernel/time/timer_stats.c",
82548 + [32278].name = "tstats_write",
82549 + [32278].param3 = 1,
82550 + [32326].file = "drivers/tty/n_r3964.c",
82551 + [32326].name = "r3964_write",
82552 + [32326].param4 = 1,
82553 + [32399].file = "drivers/net/phy/mdio_bus.c",
82554 + [32399].name = "mdiobus_alloc_size",
82555 + [32399].param1 = 1,
82556 + [32402].file = "net/ceph/pagevec.c",
82557 + [32402].name = "ceph_copy_user_to_page_vector",
82558 + [32402].param4 = 1,
82559 + [3241].file = "drivers/usb/wusbcore/crypto.c",
82560 + [3241].name = "wusb_prf",
82561 + [3241].param7 = 1,
82562 + [32459].file = "drivers/media/radio/radio-wl1273.c",
82563 + [32459].name = "wl1273_fm_fops_write",
82564 + [32459].param3 = 1,
82565 + [32531].file = "fs/bio.c",
82566 + [32531].name = "__bio_map_kern",
82567 + [32531].param2 = 1,
82568 + [32531].param3 = 1,
82569 + [32537].file = "drivers/staging/vme/devices/vme_user.c",
82570 + [32537].name = "buffer_to_user",
82571 + [32537].param3 = 1,
82572 + [32560].file = "drivers/input/input-mt.c",
82573 + [32560].name = "input_mt_init_slots",
82574 + [32560].param2 = 1,
82575 + [32600].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82576 + [32600].name = "ath6kl_set_assoc_req_ies",
82577 + [32600].param3 = 1,
82578 + [32608].file = "security/selinux/selinuxfs.c",
82579 + [32608].name = "sel_write_checkreqprot",
82580 + [32608].param3 = 1,
82581 + [32812].file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
82582 + [32812].name = "__vxge_hw_channel_allocate",
82583 + [32812].param3 = 1,
82584 + [32950].file = "fs/reiserfs/resize.c",
82585 + [32950].name = "reiserfs_resize",
82586 + [32950].param2 = 1,
82587 + [33010].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
82588 + [33010].name = "dvb_ringbuffer_pkt_read_user",
82589 + [33010].param5 = 1,
82590 + [33130].file = "net/llc/llc_sap.c",
82591 + [33130].name = "llc_alloc_frame",
82592 + [33130].param4 = 1,
82593 + [33221].file = "crypto/ablkcipher.c",
82594 + [33221].name = "ablkcipher_copy_iv",
82595 + [33221].param3 = 1,
82596 + [33268].file = "mm/maccess.c",
82597 + [33268].name = "__probe_kernel_write",
82598 + [33268].param3 = 1,
82599 + [33280].file = "fs/xfs/kmem.c",
82600 + [33280].name = "kmem_realloc",
82601 + [33280].param2 = 1,
82602 + [33375].file = "drivers/staging/rtl8712/osdep_service.h",
82603 + [33375].name = "_malloc",
82604 + [33375].param1 = 1,
82605 + [33420].file = "drivers/net/team/team.c",
82606 + [33420].name = "__team_options_register",
82607 + [33420].param3 = 1,
82608 + [33489].file = "fs/binfmt_misc.c",
82609 + [33489].name = "create_entry",
82610 + [33489].param2 = 1,
82611 + [33637].file = "net/9p/client.c",
82612 + [33637].name = "p9_client_read",
82613 + [33637].param5 = 1,
82614 + [33669].file = "fs/gfs2/glock.c",
82615 + [33669].name = "gfs2_glock_nq_m",
82616 + [33669].param1 = 1,
82617 + [33704].file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
82618 + [33704].name = "ttm_dma_page_pool_free",
82619 + [33704].param2 = 1,
82620 + [33779].file = "drivers/staging/vme/devices/vme_user.c",
82621 + [33779].name = "resource_from_user",
82622 + [33779].param3 = 1,
82623 + [33810].file = "net/mac80211/util.c",
82624 + [33810].name = "ieee80211_send_probe_req",
82625 + [33810].param6 = 1,
82626 + [3384].file = "drivers/block/paride/pg.c",
82627 + [3384].name = "pg_write",
82628 + [3384].param3 = 1,
82629 + [34105].file = "fs/libfs.c",
82630 + [34105].name = "simple_read_from_buffer",
82631 + [34105].param2 = 1,
82632 + [34105].param5 = 1,
82633 + [34120].file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
82634 + [34120].name = "pvr2_stream_buffer_count",
82635 + [34120].param2 = 1,
82636 + [34226].file = "mm/shmem.c",
82637 + [34226].name = "shmem_xattr_set",
82638 + [34226].param4 = 1,
82639 + [34251].file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
82640 + [34251].name = "OS_kmalloc",
82641 + [34251].param1 = 1,
82642 + [34276].file = "drivers/media/video/videobuf2-core.c",
82643 + [34276].name = "__vb2_perform_fileio",
82644 + [34276].param3 = 1,
82645 + [34278].file = "fs/ubifs/debug.c",
82646 + [34278].name = "dfs_global_file_write",
82647 + [34278].param3 = 1,
82648 + [34432].file = "drivers/edac/edac_pci.c",
82649 + [34432].name = "edac_pci_alloc_ctl_info",
82650 + [34432].param1 = 1,
82651 + [34532].file = "drivers/virtio/virtio_ring.c",
82652 + [34532].name = "vring_add_indirect",
82653 + [34532].param3 = 1,
82654 + [34532].param4 = 1,
82655 + [34543].file = "net/sctp/tsnmap.c",
82656 + [34543].name = "sctp_tsnmap_grow",
82657 + [34543].param2 = 1,
82658 + [34551].file = "fs/ocfs2/stack_user.c",
82659 + [34551].name = "ocfs2_control_cfu",
82660 + [34551].param2 = 1,
82661 + [34634].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82662 + [34634].name = "ath6kl_send_go_probe_resp",
82663 + [34634].param3 = 1,
82664 + [34666].file = "fs/cifs/cifs_debug.c",
82665 + [34666].name = "cifs_security_flags_proc_write",
82666 + [34666].param3 = 1,
82667 + [3466].file = "drivers/misc/altera-stapl/altera-jtag.c",
82668 + [3466].name = "altera_drscan",
82669 + [3466].param2 = 1,
82670 + [34672].file = "drivers/tty/tty_io.c",
82671 + [34672].name = "tty_write",
82672 + [34672].param3 = 1,
82673 + [34679].file = "drivers/media/video/ivtv/ivtv-fileops.c",
82674 + [34679].name = "ivtv_copy_buf_to_user",
82675 + [34679].param4 = 1,
82676 + [34721].file = "drivers/usb/host/hwa-hc.c",
82677 + [34721].name = "__hwahc_dev_set_key",
82678 + [34721].param5 = 1,
82679 + [34749].file = "mm/nobootmem.c",
82680 + [34749].name = "__alloc_bootmem_low_node",
82681 + [34749].param2 = 1,
82682 + [34760].file = "include/acpi/platform/aclinux.h",
82683 + [34760].name = "acpi_os_allocate_zeroed",
82684 + [34760].param1 = 1,
82685 + [34802].file = "drivers/scsi/cxgbi/libcxgbi.h",
82686 + [34802].name = "cxgbi_alloc_big_mem",
82687 + [34802].param1 = 1,
82688 + [34863].file = "drivers/video/fbsysfs.c",
82689 + [34863].name = "framebuffer_alloc",
82690 + [34863].param1 = 1,
82691 + [34868].file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
82692 + [34868].name = "bnad_debugfs_write_regrd",
82693 + [34868].param3 = 1,
82694 + [34882].file = "drivers/platform/x86/toshiba_acpi.c",
82695 + [34882].name = "video_proc_write",
82696 + [34882].param3 = 1,
82697 + [35050].file = "fs/ocfs2/dlmfs/dlmfs.c",
82698 + [35050].name = "dlmfs_file_write",
82699 + [35050].param3 = 1,
82700 + [35119].file = "fs/xattr.c",
82701 + [35119].name = "sys_llistxattr",
82702 + [35119].param3 = 1,
82703 + [35129].file = "mm/nobootmem.c",
82704 + [35129].name = "___alloc_bootmem_nopanic",
82705 + [35129].param1 = 1,
82706 + [35159].file = "drivers/net/wimax/i2400m/usb.c",
82707 + [35159].name = "__i2400mu_send_barker",
82708 + [35159].param3 = 1,
82709 + [35232].file = "drivers/media/video/cx18/cx18-fileops.c",
82710 + [35232].name = "cx18_read",
82711 + [35232].param3 = 1,
82712 + [35234].file = "net/irda/irnet/irnet_ppp.c",
82713 + [35234].name = "irnet_ctrl_write",
82714 + [35234].param3 = 1,
82715 + [35256].file = "sound/core/memory.c",
82716 + [35256].name = "copy_from_user_toio",
82717 + [35256].param3 = 1,
82718 + [35268].file = "security/keys/request_key_auth.c",
82719 + [35268].name = "request_key_auth_read",
82720 + [35268].param3 = 1,
82721 + [3538].file = "net/bluetooth/mgmt.c",
82722 + [3538].name = "disconnect",
82723 + [3538].param4 = 1,
82724 + [35443].file = "sound/core/pcm_memory.c",
82725 + [35443].name = "_snd_pcm_lib_alloc_vmalloc_buffer",
82726 + [35443].param2 = 1,
82727 + [35468].file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
82728 + [35468].name = "xenbus_file_write",
82729 + [35468].param3 = 1,
82730 + [35536].file = "kernel/sysctl_binary.c",
82731 + [35536].name = "bin_uuid",
82732 + [35536].param3 = 1,
82733 + [35551].file = "drivers/media/video/ivtv/ivtv-fileops.c",
82734 + [35551].name = "ivtv_read_pos",
82735 + [35551].param3 = 1,
82736 + [35556].file = "fs/read_write.c",
82737 + [35556].name = "sys_readv",
82738 + [35556].param3 = 1,
82739 + [35693].file = "drivers/staging/mei/main.c",
82740 + [35693].name = "mei_read",
82741 + [35693].param3 = 1,
82742 + [35703].file = "crypto/ablkcipher.c",
82743 + [35703].name = "ablkcipher_next_slow",
82744 + [35703].param3 = 1,
82745 + [35703].param4 = 1,
82746 + [35729].file = "include/linux/skbuff.h",
82747 + [35729].name = "__dev_alloc_skb",
82748 + [35729].param1 = 1,
82749 + [35731].file = "drivers/usb/class/cdc-wdm.c",
82750 + [35731].name = "wdm_read",
82751 + [35731].param3 = 1,
82752 + [35796].file = "drivers/mtd/nand/nand_bch.c",
82753 + [35796].name = "nand_bch_init",
82754 + [35796].param2 = 1,
82755 + [35796].param3 = 1,
82756 + [35880].file = "fs/ecryptfs/crypto.c",
82757 + [35880].name = "ecryptfs_encrypt_and_encode_filename",
82758 + [35880].param6 = 1,
82759 + [36076].file = "drivers/net/ethernet/sfc/tx.c",
82760 + [36076].name = "efx_tsoh_heap_alloc",
82761 + [36076].param2 = 1,
82762 + [36080].file = "drivers/media/video/v4l2-ioctl.c",
82763 + [36080].name = "video_usercopy",
82764 + [36080].param2 = 1,
82765 + [36149].file = "fs/udf/inode.c",
82766 + [36149].name = "udf_alloc_i_data",
82767 + [36149].param2 = 1,
82768 + [36183].file = "drivers/tty/vt/vc_screen.c",
82769 + [36183].name = "vcs_read",
82770 + [36183].param3 = 1,
82771 + [36199].file = "net/sunrpc/auth_gss/auth_gss.c",
82772 + [36199].name = "gss_pipe_downcall",
82773 + [36199].param3 = 1,
82774 + [36206].file = "net/ipv4/tcp_input.c",
82775 + [36206].name = "tcp_collapse",
82776 + [36206].param5 = 1,
82777 + [36206].param6 = 1,
82778 + [36230].file = "drivers/net/wan/hdlc_ppp.c",
82779 + [36230].name = "ppp_cp_parse_cr",
82780 + [36230].param4 = 1,
82781 + [36284].file = "drivers/spi/spi.c",
82782 + [36284].name = "spi_register_board_info",
82783 + [36284].param2 = 1,
82784 + [36490].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82785 + [36490].name = "ath6kl_cfg80211_connect_event",
82786 + [36490].param7 = 1,
82787 + [36522].file = "drivers/hid/hidraw.c",
82788 + [36522].name = "hidraw_send_report",
82789 + [36522].param3 = 1,
82790 + [36560].file = "net/sunrpc/cache.c",
82791 + [36560].name = "write_flush",
82792 + [36560].param3 = 1,
82793 + [36807].file = "drivers/usb/mon/mon_bin.c",
82794 + [36807].name = "mon_bin_get_event",
82795 + [36807].param4 = 1,
82796 + [37034].file = "fs/cifs/cifssmb.c",
82797 + [37034].name = "cifs_writedata_alloc",
82798 + [37034].param1 = 1,
82799 + [37044].file = "sound/firewire/packets-buffer.c",
82800 + [37044].name = "iso_packets_buffer_init",
82801 + [37044].param3 = 1,
82802 + [37108].file = "drivers/media/dvb/ttpci/av7110_av.c",
82803 + [37108].name = "dvb_video_write",
82804 + [37108].param3 = 1,
82805 + [37154].file = "net/nfc/llcp/commands.c",
82806 + [37154].name = "nfc_llcp_build_tlv",
82807 + [37154].param3 = 1,
82808 + [37163].file = "net/core/skbuff.c",
82809 + [37163].name = "__netdev_alloc_skb",
82810 + [37163].param2 = 1,
82811 + [37233].file = "fs/ocfs2/cluster/tcp.c",
82812 + [37233].name = "o2net_send_message_vec",
82813 + [37233].param4 = 1,
82814 + [37241].file = "net/atm/lec.c",
82815 + [37241].name = "lane2_associate_req",
82816 + [37241].param4 = 1,
82817 + [37384].file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
82818 + [37384].name = "vmw_fifo_reserve",
82819 + [37384].param2 = 1,
82820 + [37497].file = "net/mac80211/util.c",
82821 + [37497].name = "ieee80211_build_probe_req",
82822 + [37497].param7 = 1,
82823 + [37535].file = "kernel/trace/trace.c",
82824 + [37535].name = "tracing_trace_options_write",
82825 + [37535].param3 = 1,
82826 + [37611].file = "drivers/xen/xenbus/xenbus_xs.c",
82827 + [37611].name = "split",
82828 + [37611].param2 = 1,
82829 + [37661].file = "mm/filemap.c",
82830 + [37661].name = "file_read_actor",
82831 + [37661].param4 = 1,
82832 + [37852].file = "drivers/staging/android/logger.c",
82833 + [37852].name = "do_read_log_to_user",
82834 + [37852].param4 = 1,
82835 + [37921].file = "drivers/net/wireless/wl12xx/rx.c",
82836 + [37921].name = "wl1271_rx_handle_data",
82837 + [37921].param3 = 1,
82838 + [37976].file = "drivers/platform/x86/asus_acpi.c",
82839 + [37976].name = "bluetooth_proc_write",
82840 + [37976].param3 = 1,
82841 + [3797].file = "sound/pci/asihpi/hpicmn.c",
82842 + [3797].name = "hpi_alloc_control_cache",
82843 + [3797].param1 = 1,
82844 + [3801].file = "drivers/block/paride/pt.c",
82845 + [3801].name = "pt_write",
82846 + [3801].param3 = 1,
82847 + [38052].file = "kernel/kexec.c",
82848 + [38052].name = "kimage_normal_alloc",
82849 + [38052].param3 = 1,
82850 + [38057].file = "fs/coda/psdev.c",
82851 + [38057].name = "coda_psdev_write",
82852 + [38057].param3 = 1,
82853 + [38186].file = "kernel/signal.c",
82854 + [38186].name = "do_sigpending",
82855 + [38186].param2 = 1,
82856 + [38314].file = "fs/nfs/read.c",
82857 + [38314].name = "nfs_readdata_alloc",
82858 + [38314].param1 = 1,
82859 + [38401].file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
82860 + [38401].name = "queue_reply",
82861 + [38401].param3 = 1,
82862 + [3841].file = "drivers/platform/x86/asus_acpi.c",
82863 + [3841].name = "write_led",
82864 + [3841].param2 = 1,
82865 + [38532].file = "fs/afs/cell.c",
82866 + [38532].name = "afs_cell_lookup",
82867 + [38532].param2 = 1,
82868 + [38564].file = "fs/nfs/nfs4proc.c",
82869 + [38564].name = "nfs4_realloc_slot_table",
82870 + [38564].param2 = 1,
82871 + [38576].file = "drivers/i2c/i2c-dev.c",
82872 + [38576].name = "i2cdev_read",
82873 + [38576].param3 = 1,
82874 + [38704].file = "drivers/media/video/uvc/uvc_driver.c",
82875 + [38704].name = "uvc_alloc_entity",
82876 + [38704].param3 = 1,
82877 + [38704].param4 = 1,
82878 + [38747].file = "fs/xattr.c",
82879 + [38747].name = "sys_lgetxattr",
82880 + [38747].param4 = 1,
82881 + [38867].file = "drivers/scsi/scsi_transport_fc.c",
82882 + [38867].name = "fc_host_post_vendor_event",
82883 + [38867].param3 = 1,
82884 + [38931].file = "drivers/isdn/hardware/eicon/capimain.c",
82885 + [38931].name = "diva_os_alloc_message_buffer",
82886 + [38931].param1 = 1,
82887 + [38972].file = "security/smack/smackfs.c",
82888 + [38972].name = "smk_write_logging",
82889 + [38972].param3 = 1,
82890 + [39001].file = "net/xfrm/xfrm_hash.c",
82891 + [39001].name = "xfrm_hash_alloc",
82892 + [39001].param1 = 1,
82893 + [39052].file = "drivers/input/evdev.c",
82894 + [39052].name = "evdev_ioctl",
82895 + [39052].param2 = 1,
82896 + [39066].file = "drivers/media/dvb/frontends/tda10048.c",
82897 + [39066].name = "tda10048_writeregbulk",
82898 + [39066].param4 = 1,
82899 + [39118].file = "drivers/misc/iwmc3200top/log.c",
82900 + [39118].name = "store_iwmct_log_level_fw",
82901 + [39118].param4 = 1,
82902 + [39254].file = "drivers/char/pcmcia/cm4000_cs.c",
82903 + [39254].name = "cmm_write",
82904 + [39254].param3 = 1,
82905 + [39392].file = "drivers/atm/solos-pci.c",
82906 + [39392].name = "send_command",
82907 + [39392].param4 = 1,
82908 + [39415].file = "fs/pstore/inode.c",
82909 + [39415].name = "pstore_mkfile",
82910 + [39415].param5 = 1,
82911 + [39417].file = "drivers/block/DAC960.c",
82912 + [39417].name = "dac960_user_command_proc_write",
82913 + [39417].param3 = 1,
82914 + [39460].file = "fs/btrfs/volumes.c",
82915 + [39460].name = "btrfs_map_block",
82916 + [39460].param3 = 1,
82917 + [39479].file = "drivers/ide/ide-tape.c",
82918 + [39479].name = "idetape_chrdev_read",
82919 + [39479].param3 = 1,
82920 + [39586].file = "drivers/hv/channel.c",
82921 + [39586].name = "create_gpadl_header",
82922 + [39586].param2 = 1,
82923 + [39638].file = "security/selinux/selinuxfs.c",
82924 + [39638].name = "sel_write_avc_cache_threshold",
82925 + [39638].param3 = 1,
82926 + [39645].file = "drivers/media/dvb/dvb-core/dvbdev.c",
82927 + [39645].name = "dvb_generic_ioctl",
82928 + [39645].param2 = 1,
82929 + [39770].file = "include/linux/mISDNif.h",
82930 + [39770].name = "mI_alloc_skb",
82931 + [39770].param1 = 1,
82932 + [39813].file = "fs/ocfs2/stack_user.c",
82933 + [39813].name = "ocfs2_control_message",
82934 + [39813].param3 = 1,
82935 + [39888].file = "net/core/skbuff.c",
82936 + [39888].name = "__alloc_skb",
82937 + [39888].param1 = 1,
82938 + [39980].file = "net/bluetooth/mgmt.c",
82939 + [39980].name = "pair_device",
82940 + [39980].param4 = 1,
82941 + [40043].file = "drivers/media/video/v4l2-ioctl.c",
82942 + [40043].name = "video_ioctl2",
82943 + [40043].param2 = 1,
82944 + [40049].file = "drivers/bluetooth/btmrvl_debugfs.c",
82945 + [40049].name = "btmrvl_psmode_write",
82946 + [40049].param3 = 1,
82947 + [40075].file = "drivers/media/video/c-qcam.c",
82948 + [40075].name = "qc_capture",
82949 + [40075].param3 = 1,
82950 + [40163].file = "fs/ncpfs/file.c",
82951 + [40163].name = "ncp_file_write",
82952 + [40163].param3 = 1,
82953 + [40240].file = "drivers/char/nvram.c",
82954 + [40240].name = "nvram_write",
82955 + [40240].param3 = 1,
82956 + [40256].file = "drivers/tty/vt/vc_screen.c",
82957 + [40256].name = "vcs_write",
82958 + [40256].param3 = 1,
82959 + [40302].file = "sound/isa/gus/gus_dram.c",
82960 + [40302].name = "snd_gus_dram_poke",
82961 + [40302].param4 = 1,
82962 + [40339].file = "drivers/acpi/apei/hest.c",
82963 + [40339].name = "hest_ghes_dev_register",
82964 + [40339].param1 = 1,
82965 + [40355].file = "drivers/staging/mei/main.c",
82966 + [40355].name = "mei_write",
82967 + [40355].param3 = 1,
82968 + [40373].file = "fs/cifs/cifs_spnego.c",
82969 + [40373].name = "cifs_spnego_key_instantiate",
82970 + [40373].param3 = 1,
82971 + [40519].file = "net/sctp/socket.c",
82972 + [40519].name = "sctp_setsockopt_events",
82973 + [40519].param3 = 1,
82974 + [40694].file = "mm/page_cgroup.c",
82975 + [40694].name = "alloc_page_cgroup",
82976 + [40694].param1 = 1,
82977 + [40731].file = "drivers/tty/tty_io.c",
82978 + [40731].name = "do_tty_write",
82979 + [40731].param5 = 1,
82980 + [40754].file = "fs/btrfs/delayed-inode.c",
82981 + [40754].name = "btrfs_alloc_delayed_item",
82982 + [40754].param1 = 1,
82983 + [40786].file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
82984 + [40786].name = "asn1_octets_decode",
82985 + [40786].param2 = 1,
82986 + [40901].file = "drivers/block/drbd/drbd_bitmap.c",
82987 + [40901].name = "drbd_bm_resize",
82988 + [40901].param2 = 1,
82989 + [40951].file = "drivers/xen/evtchn.c",
82990 + [40951].name = "evtchn_read",
82991 + [40951].param3 = 1,
82992 + [40952].file = "drivers/misc/sgi-xp/xpc_partition.c",
82993 + [40952].name = "xpc_kmalloc_cacheline_aligned",
82994 + [40952].param1 = 1,
82995 + [41000].file = "sound/core/pcm_native.c",
82996 + [41000].name = "snd_pcm_aio_read",
82997 + [41000].param3 = 1,
82998 + [41005].file = "net/bridge/netfilter/ebtables.c",
82999 + [41005].name = "copy_counters_to_user",
83000 + [41005].param5 = 1,
83001 + [41041].file = "net/core/sock.c",
83002 + [41041].name = "sock_wmalloc",
83003 + [41041].param2 = 1,
83004 + [41122].file = "fs/binfmt_misc.c",
83005 + [41122].name = "bm_status_write",
83006 + [41122].param3 = 1,
83007 + [41176].file = "kernel/trace/trace_events.c",
83008 + [41176].name = "subsystem_filter_write",
83009 + [41176].param3 = 1,
83010 + [41249].file = "drivers/media/video/zr364xx.c",
83011 + [41249].name = "send_control_msg",
83012 + [41249].param6 = 1,
83013 + [41287].file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
83014 + [41287].name = "vxge_os_dma_malloc_async",
83015 + [41287].param3 = 1,
83016 + [41302].file = "net/dns_resolver/dns_query.c",
83017 + [41302].name = "dns_query",
83018 + [41302].param3 = 1,
83019 + [41408].file = "mm/filemap_xip.c",
83020 + [41408].name = "__xip_file_write",
83021 + [41408].param3 = 1,
83022 + [41547].file = "net/bluetooth/smp.c",
83023 + [41547].name = "smp_build_cmd",
83024 + [41547].param3 = 1,
83025 + [4155].file = "kernel/kexec.c",
83026 + [4155].name = "do_kimage_alloc",
83027 + [4155].param3 = 1,
83028 + [41676].file = "fs/compat.c",
83029 + [41676].name = "compat_sys_preadv",
83030 + [41676].param3 = 1,
83031 + [4167].file = "drivers/media/dvb/frontends/cx24116.c",
83032 + [4167].name = "cx24116_writeregN",
83033 + [4167].param4 = 1,
83034 + [41793].file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83035 + [41793].name = "ath6kl_wmi_send_mgmt_cmd",
83036 + [41793].param7 = 1,
83037 + [41924].file = "security/keys/keyctl.c",
83038 + [41924].name = "keyctl_get_security",
83039 + [41924].param3 = 1,
83040 + [41968].file = "fs/btrfs/volumes.c",
83041 + [41968].name = "__btrfs_map_block",
83042 + [41968].param3 = 1,
83043 + [4202].file = "drivers/edac/edac_mc.c",
83044 + [4202].name = "edac_mc_alloc",
83045 + [4202].param1 = 1,
83046 + [42081].file = "net/econet/af_econet.c",
83047 + [42081].name = "aun_incoming",
83048 + [42081].param3 = 1,
83049 + [42143].file = "drivers/media/video/c-qcam.c",
83050 + [42143].name = "qcam_read",
83051 + [42143].param3 = 1,
83052 + [42206].file = "fs/quota/quota_tree.c",
83053 + [42206].name = "getdqbuf",
83054 + [42206].param1 = 1,
83055 + [42270].file = "net/wireless/scan.c",
83056 + [42270].name = "cfg80211_inform_bss_frame",
83057 + [42270].param4 = 1,
83058 + [42281].file = "include/linux/mISDNif.h",
83059 + [42281].name = "_queue_data",
83060 + [42281].param4 = 1,
83061 + [42420].file = "drivers/net/wireless/hostap/hostap_ioctl.c",
83062 + [42420].name = "prism2_set_genericelement",
83063 + [42420].param3 = 1,
83064 + [42472].file = "fs/compat.c",
83065 + [42472].name = "compat_readv",
83066 + [42472].param3 = 1,
83067 + [42473].file = "net/tipc/name_table.c",
83068 + [42473].name = "tipc_subseq_alloc",
83069 + [42473].param1 = 1,
83070 + [42562].file = "kernel/kfifo.c",
83071 + [42562].name = "__kfifo_to_user_r",
83072 + [42562].param3 = 1,
83073 + [42666].file = "drivers/pcmcia/cistpl.c",
83074 + [42666].name = "read_cis_cache",
83075 + [42666].param4 = 1,
83076 + [42714].file = "drivers/scsi/scsi_tgt_lib.c",
83077 + [42714].name = "scsi_tgt_copy_sense",
83078 + [42714].param3 = 1,
83079 + [42833].file = "kernel/trace/blktrace.c",
83080 + [42833].name = "blk_msg_write",
83081 + [42833].param3 = 1,
83082 + [42857].file = "security/selinux/selinuxfs.c",
83083 + [42857].name = "sel_write_member",
83084 + [42857].param3 = 1,
83085 + [42882].file = "security/keys/user_defined.c",
83086 + [42882].name = "user_instantiate",
83087 + [42882].param3 = 1,
83088 + [42930].file = "net/caif/cfpkt_skbuff.c",
83089 + [42930].name = "cfpkt_create_pfx",
83090 + [42930].param1 = 1,
83091 + [42930].param2 = 1,
83092 + [43023].file = "drivers/usb/misc/usblcd.c",
83093 + [43023].name = "lcd_write",
83094 + [43023].param3 = 1,
83095 + [43104].file = "drivers/mtd/devices/mtd_dataflash.c",
83096 + [43104].name = "dataflash_read_user_otp",
83097 + [43104].param3 = 1,
83098 + [43133].file = "lib/mpi/mpiutil.c",
83099 + [43133].name = "mpi_resize",
83100 + [43133].param2 = 1,
83101 + [4324].file = "drivers/video/fbmem.c",
83102 + [4324].name = "fb_read",
83103 + [4324].param3 = 1,
83104 + [43266].file = "fs/afs/cell.c",
83105 + [43266].name = "afs_cell_alloc",
83106 + [43266].param2 = 1,
83107 + [4328].file = "drivers/usb/musb/musb_debugfs.c",
83108 + [4328].name = "musb_test_mode_write",
83109 + [4328].param3 = 1,
83110 + [43380].file = "drivers/scsi/bfa/bfad_debugfs.c",
83111 + [43380].name = "bfad_debugfs_write_regrd",
83112 + [43380].param3 = 1,
83113 + [43510].file = "kernel/kexec.c",
83114 + [43510].name = "compat_sys_kexec_load",
83115 + [43510].param2 = 1,
83116 + [43540].file = "include/rdma/ib_verbs.h",
83117 + [43540].name = "ib_copy_to_udata",
83118 + [43540].param3 = 1,
83119 + [4357].file = "security/tomoyo/securityfs_if.c",
83120 + [4357].name = "tomoyo_read_self",
83121 + [4357].param3 = 1,
83122 + [43590].file = "security/smack/smackfs.c",
83123 + [43590].name = "smk_write_onlycap",
83124 + [43590].param3 = 1,
83125 + [43596].file = "drivers/usb/core/buffer.c",
83126 + [43596].name = "hcd_buffer_alloc",
83127 + [43596].param2 = 1,
83128 + [43632].file = "drivers/media/video/videobuf2-core.c",
83129 + [43632].name = "vb2_read",
83130 + [43632].param3 = 1,
83131 + [43659].file = "drivers/firmware/efivars.c",
83132 + [43659].name = "efivar_create_sysfs_entry",
83133 + [43659].param2 = 1,
83134 + [43731].file = "drivers/hid/hid-picolcd.c",
83135 + [43731].name = "picolcd_debug_eeprom_read",
83136 + [43731].param3 = 1,
83137 + [43777].file = "drivers/acpi/acpica/utobject.c",
83138 + [43777].name = "acpi_ut_create_buffer_object",
83139 + [43777].param1 = 1,
83140 + [43798].file = "net/bluetooth/mgmt.c",
83141 + [43798].name = "set_local_name",
83142 + [43798].param4 = 1,
83143 + [4380].file = "drivers/mtd/devices/mtd_dataflash.c",
83144 + [4380].name = "dataflash_read_fact_otp",
83145 + [4380].param3 = 1,
83146 + [43834].file = "security/apparmor/apparmorfs.c",
83147 + [43834].name = "profile_replace",
83148 + [43834].param3 = 1,
83149 + [43895].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83150 + [43895].name = "ddb_output_write",
83151 + [43895].param3 = 1,
83152 + [43899].file = "drivers/media/rc/imon.c",
83153 + [43899].name = "vfd_write",
83154 + [43899].param3 = 1,
83155 + [43900].file = "drivers/scsi/cxgbi/libcxgbi.c",
83156 + [43900].name = "cxgbi_device_portmap_create",
83157 + [43900].param3 = 1,
83158 + [43922].file = "drivers/mmc/card/mmc_test.c",
83159 + [43922].name = "mmc_test_alloc_mem",
83160 + [43922].param3 = 1,
83161 + [43946].file = "drivers/net/wireless/ath/ath6kl/txrx.c",
83162 + [43946].name = "aggr_recv_addba_req_evt",
83163 + [43946].param4 = 1,
83164 + [44006].file = "mm/process_vm_access.c",
83165 + [44006].name = "process_vm_rw_pages",
83166 + [44006].param5 = 1,
83167 + [44006].param6 = 1,
83168 + [44050].file = "fs/nfs/idmap.c",
83169 + [44050].name = "nfs_map_group_to_gid",
83170 + [44050].param3 = 1,
83171 + [44125].file = "fs/ext4/super.c",
83172 + [44125].name = "ext4_kvmalloc",
83173 + [44125].param1 = 1,
83174 + [44266].file = "kernel/cgroup.c",
83175 + [44266].name = "cgroup_write_string",
83176 + [44266].param5 = 1,
83177 + [44290].file = "drivers/net/usb/dm9601.c",
83178 + [44290].name = "dm_read",
83179 + [44290].param3 = 1,
83180 + [44308].file = "crypto/af_alg.c",
83181 + [44308].name = "alg_setkey",
83182 + [44308].param3 = 1,
83183 + [44510].file = "drivers/net/ethernet/broadcom/bnx2.c",
83184 + [44510].name = "bnx2_nvram_write",
83185 + [44510].param2 = 1,
83186 + [44625].file = "net/bluetooth/mgmt.c",
83187 + [44625].name = "set_connectable",
83188 + [44625].param4 = 1,
83189 + [44642].file = "drivers/net/wireless/iwmc3200wifi/commands.c",
83190 + [44642].name = "iwm_umac_set_config_var",
83191 + [44642].param4 = 1,
83192 + [44698].file = "net/sctp/socket.c",
83193 + [44698].name = "sctp_setsockopt_context",
83194 + [44698].param3 = 1,
83195 + [4471].file = "fs/ntfs/malloc.h",
83196 + [4471].name = "__ntfs_malloc",
83197 + [4471].param1 = 1,
83198 + [44773].file = "drivers/staging/vme/devices/vme_user.c",
83199 + [44773].name = "vme_user_write",
83200 + [44773].param3 = 1,
83201 + [44825].file = "drivers/scsi/osd/osd_initiator.c",
83202 + [44825].name = "_osd_realloc_seg",
83203 + [44825].param3 = 1,
83204 + [44852].file = "net/sctp/socket.c",
83205 + [44852].name = "sctp_setsockopt_rtoinfo",
83206 + [44852].param3 = 1,
83207 + [44936].file = "drivers/md/dm-raid.c",
83208 + [44936].name = "context_alloc",
83209 + [44936].param3 = 1,
83210 + [44943].file = "mm/util.c",
83211 + [44943].name = "kmemdup",
83212 + [44943].param2 = 1,
83213 + [44946].file = "net/sctp/socket.c",
83214 + [44946].name = "sctp_setsockopt_auth_chunk",
83215 + [44946].param3 = 1,
83216 + [44990].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
83217 + [44990].name = "pvr2_ioread_set_sync_key",
83218 + [44990].param3 = 1,
83219 + [45000].file = "fs/afs/proc.c",
83220 + [45000].name = "afs_proc_rootcell_write",
83221 + [45000].param3 = 1,
83222 + [45117].file = "drivers/staging/winbond/wb35reg.c",
83223 + [45117].name = "Wb35Reg_BurstWrite",
83224 + [45117].param4 = 1,
83225 + [45200].file = "drivers/scsi/scsi_proc.c",
83226 + [45200].name = "proc_scsi_write_proc",
83227 + [45200].param3 = 1,
83228 + [45217].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
83229 + [45217].name = "iwl_dbgfs_debug_level_write",
83230 + [45217].param3 = 1,
83231 + [45233].file = "net/rds/info.c",
83232 + [45233].name = "rds_info_getsockopt",
83233 + [45233].param3 = 1,
83234 + [45326].file = "drivers/mtd/ubi/cdev.c",
83235 + [45326].name = "vol_cdev_read",
83236 + [45326].param3 = 1,
83237 + [45335].file = "fs/read_write.c",
83238 + [45335].name = "vfs_writev",
83239 + [45335].param3 = 1,
83240 + [45366].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
83241 + [45366].name = "init_tid_tabs",
83242 + [45366].param2 = 1,
83243 + [45366].param3 = 1,
83244 + [45366].param4 = 1,
83245 + [45534].file = "drivers/net/wireless/ath/carl9170/cmd.c",
83246 + [45534].name = "carl9170_cmd_buf",
83247 + [45534].param3 = 1,
83248 + [45576].file = "net/netfilter/xt_recent.c",
83249 + [45576].name = "recent_mt_proc_write",
83250 + [45576].param3 = 1,
83251 + [45583].file = "fs/gfs2/dir.c",
83252 + [45583].name = "leaf_dealloc",
83253 + [45583].param3 = 1,
83254 + [45586].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
83255 + [45586].name = "rt2x00debug_write_bbp",
83256 + [45586].param3 = 1,
83257 + [45629].file = "lib/bch.c",
83258 + [45629].name = "bch_alloc",
83259 + [45629].param1 = 1,
83260 + [45633].file = "drivers/input/evdev.c",
83261 + [45633].name = "evdev_do_ioctl",
83262 + [45633].param2 = 1,
83263 + [45743].file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
83264 + [45743].name = "qlcnic_alloc_msix_entries",
83265 + [45743].param2 = 1,
83266 + [45864].file = "drivers/atm/ambassador.c",
83267 + [45864].name = "create_queues",
83268 + [45864].param2 = 1,
83269 + [45864].param3 = 1,
83270 + [45930].file = "security/apparmor/apparmorfs.c",
83271 + [45930].name = "profile_remove",
83272 + [45930].param3 = 1,
83273 + [45954].file = "drivers/usb/misc/legousbtower.c",
83274 + [45954].name = "tower_write",
83275 + [45954].param3 = 1,
83276 + [46140].file = "sound/core/memalloc.c",
83277 + [46140].name = "snd_mem_proc_write",
83278 + [46140].param3 = 1,
83279 + [4616].file = "net/sunrpc/cache.c",
83280 + [4616].name = "cache_do_downcall",
83281 + [4616].param3 = 1,
83282 + [46243].file = "fs/binfmt_misc.c",
83283 + [46243].name = "bm_register_write",
83284 + [46243].param3 = 1,
83285 + [46250].file = "fs/xattr.c",
83286 + [46250].name = "sys_getxattr",
83287 + [46250].param4 = 1,
83288 + [46343].file = "fs/compat.c",
83289 + [46343].name = "compat_do_readv_writev",
83290 + [46343].param4 = 1,
83291 + [46400].file = "drivers/staging/sep/sep_driver.c",
83292 + [46400].name = "sep_prepare_input_output_dma_table",
83293 + [46400].param2 = 1,
83294 + [46400].param3 = 1,
83295 + [46400].param4 = 1,
83296 + [4644].file = "drivers/net/usb/mcs7830.c",
83297 + [4644].name = "mcs7830_get_reg",
83298 + [4644].param3 = 1,
83299 + [46605].file = "sound/core/oss/pcm_oss.c",
83300 + [46605].name = "snd_pcm_oss_sync1",
83301 + [46605].param2 = 1,
83302 + [46630].file = "net/decnet/af_decnet.c",
83303 + [46630].name = "__dn_setsockopt",
83304 + [46630].param5 = 1,
83305 + [46655].file = "drivers/media/video/hdpvr/hdpvr-video.c",
83306 + [46655].name = "hdpvr_read",
83307 + [46655].param3 = 1,
83308 + [46685].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
83309 + [46685].name = "ttm_bo_fbdev_io",
83310 + [46685].param4 = 1,
83311 + [46742].file = "drivers/scsi/st.c",
83312 + [46742].name = "sgl_map_user_pages",
83313 + [46742].param2 = 1,
83314 + [46881].file = "drivers/char/lp.c",
83315 + [46881].name = "lp_write",
83316 + [46881].param3 = 1,
83317 + [47130].file = "kernel/kfifo.c",
83318 + [47130].name = "kfifo_copy_to_user",
83319 + [47130].param3 = 1,
83320 + [47265].file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
83321 + [47265].name = "bnx2fc_cmd_mgr_alloc",
83322 + [47265].param2 = 1,
83323 + [47265].param3 = 1,
83324 + [47309].file = "drivers/scsi/aic94xx/aic94xx_init.c",
83325 + [47309].name = "asd_store_update_bios",
83326 + [47309].param4 = 1,
83327 + [47342].file = "fs/proc/base.c",
83328 + [47342].name = "sched_autogroup_write",
83329 + [47342].param3 = 1,
83330 + [47363].file = "drivers/input/evdev.c",
83331 + [47363].name = "evdev_ioctl_handler",
83332 + [47363].param2 = 1,
83333 + [47385].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
83334 + [47385].name = "zd_usb_iowrite16v",
83335 + [47385].param3 = 1,
83336 + [4738].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83337 + [4738].name = "ath6kl_set_ap_probe_resp_ies",
83338 + [4738].param3 = 1,
83339 + [47393].file = "drivers/net/wireless/ath/main.c",
83340 + [47393].name = "ath_rxbuf_alloc",
83341 + [47393].param2 = 1,
83342 + [47463].file = "fs/xfs/kmem.c",
83343 + [47463].name = "kmem_zalloc",
83344 + [47463].param1 = 1,
83345 + [47474].file = "kernel/trace/trace.c",
83346 + [47474].name = "tracing_buffers_read",
83347 + [47474].param3 = 1,
83348 + [47636].file = "drivers/usb/class/usblp.c",
83349 + [47636].name = "usblp_ioctl",
83350 + [47636].param2 = 1,
83351 + [47637].file = "drivers/block/cciss.c",
83352 + [47637].name = "cciss_proc_write",
83353 + [47637].param3 = 1,
83354 + [47712].file = "net/sctp/socket.c",
83355 + [47712].name = "sctp_setsockopt_maxburst",
83356 + [47712].param3 = 1,
83357 + [47728].file = "drivers/char/agp/isoch.c",
83358 + [47728].name = "agp_3_5_isochronous_node_enable",
83359 + [47728].param3 = 1,
83360 + [4779].file = "fs/pipe.c",
83361 + [4779].name = "pipe_set_size",
83362 + [4779].param2 = 1,
83363 + [47881].file = "security/selinux/selinuxfs.c",
83364 + [47881].name = "sel_write_disable",
83365 + [47881].param3 = 1,
83366 + [48111].file = "net/wireless/sme.c",
83367 + [48111].name = "cfg80211_roamed_bss",
83368 + [48111].param4 = 1,
83369 + [48111].param6 = 1,
83370 + [48124].file = "drivers/net/wireless/iwmc3200wifi/main.c",
83371 + [48124].name = "iwm_notif_send",
83372 + [48124].param6 = 1,
83373 + [48155].file = "net/sctp/sm_make_chunk.c",
83374 + [48155].name = "sctp_make_abort_user",
83375 + [48155].param3 = 1,
83376 + [48182].file = "crypto/cryptd.c",
83377 + [48182].name = "cryptd_alloc_instance",
83378 + [48182].param2 = 1,
83379 + [48182].param3 = 1,
83380 + [48248].file = "security/keys/keyctl.c",
83381 + [48248].name = "keyctl_instantiate_key",
83382 + [48248].param3 = 1,
83383 + [4829].file = "drivers/block/floppy.c",
83384 + [4829].name = "fd_copyout",
83385 + [4829].param3 = 1,
83386 + [48632].file = "net/bluetooth/l2cap_core.c",
83387 + [48632].name = "l2cap_build_cmd",
83388 + [48632].param4 = 1,
83389 + [48642].file = "fs/hugetlbfs/inode.c",
83390 + [48642].name = "hugetlbfs_read",
83391 + [48642].param3 = 1,
83392 + [48720].file = "drivers/gpu/drm/i915/i915_debugfs.c",
83393 + [48720].name = "i915_max_freq_write",
83394 + [48720].param3 = 1,
83395 + [48768].file = "net/irda/irnet/irnet_ppp.c",
83396 + [48768].name = "dev_irnet_write",
83397 + [48768].param3 = 1,
83398 + [48818].file = "net/sunrpc/svc.c",
83399 + [48818].name = "svc_pool_map_alloc_arrays",
83400 + [48818].param2 = 1,
83401 + [48856].file = "drivers/acpi/acpica/utalloc.c",
83402 + [48856].name = "acpi_ut_initialize_buffer",
83403 + [48856].param2 = 1,
83404 + [48862].file = "net/sctp/socket.c",
83405 + [48862].name = "sctp_setsockopt_adaptation_layer",
83406 + [48862].param3 = 1,
83407 + [49126].file = "lib/prio_heap.c",
83408 + [49126].name = "heap_init",
83409 + [49126].param2 = 1,
83410 + [49143].file = "sound/core/oss/pcm_oss.c",
83411 + [49143].name = "snd_pcm_oss_write2",
83412 + [49143].param3 = 1,
83413 + [49216].file = "fs/read_write.c",
83414 + [49216].name = "do_readv_writev",
83415 + [49216].param4 = 1,
83416 + [49426].file = "net/bluetooth/l2cap_sock.c",
83417 + [49426].name = "l2cap_sock_setsockopt_old",
83418 + [49426].param4 = 1,
83419 + [49448].file = "drivers/isdn/gigaset/common.c",
83420 + [49448].name = "gigaset_initdriver",
83421 + [49448].param2 = 1,
83422 + [49494].file = "drivers/virtio/virtio_ring.c",
83423 + [49494].name = "vring_new_virtqueue",
83424 + [49494].param1 = 1,
83425 + [49499].file = "drivers/block/nvme.c",
83426 + [49499].name = "nvme_alloc_iod",
83427 + [49499].param1 = 1,
83428 + [49510].file = "net/sctp/socket.c",
83429 + [49510].name = "sctp_setsockopt_autoclose",
83430 + [49510].param3 = 1,
83431 + [4958].file = "drivers/net/wireless/p54/fwio.c",
83432 + [4958].name = "p54_alloc_skb",
83433 + [4958].param3 = 1,
83434 + [49604].file = "crypto/af_alg.c",
83435 + [49604].name = "alg_setsockopt",
83436 + [49604].param5 = 1,
83437 + [49646].file = "drivers/tty/vt/vt.c",
83438 + [49646].name = "vc_resize",
83439 + [49646].param2 = 1,
83440 + [49646].param3 = 1,
83441 + [49658].file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
83442 + [49658].name = "dma_attach",
83443 + [49658].param6 = 1,
83444 + [49658].param7 = 1,
83445 + [49663].file = "drivers/media/video/uvc/uvc_driver.c",
83446 + [49663].name = "uvc_simplify_fraction",
83447 + [49663].param3 = 1,
83448 + [49746].file = "net/ipv4/netfilter/arp_tables.c",
83449 + [49746].name = "compat_do_arpt_set_ctl",
83450 + [49746].param4 = 1,
83451 + [49780].file = "net/mac80211/key.c",
83452 + [49780].name = "ieee80211_key_alloc",
83453 + [49780].param3 = 1,
83454 + [49805].file = "drivers/pci/pci.c",
83455 + [49805].name = "pci_add_cap_save_buffer",
83456 + [49805].param3 = 1,
83457 + [49845].file = "mm/vmalloc.c",
83458 + [49845].name = "__vmalloc_node",
83459 + [49845].param1 = 1,
83460 + [49929].file = "drivers/mtd/ubi/cdev.c",
83461 + [49929].name = "vol_cdev_direct_write",
83462 + [49929].param3 = 1,
83463 + [49935].file = "fs/xfs/kmem.c",
83464 + [49935].name = "kmem_zalloc_greedy",
83465 + [49935].param2 = 1,
83466 + [49935].param3 = 1,
83467 + [49].file = "net/atm/svc.c",
83468 + [49].name = "svc_setsockopt",
83469 + [49].param5 = 1,
83470 + [50518].file = "drivers/gpu/drm/nouveau/nouveau_gem.c",
83471 + [50518].name = "u_memcpya",
83472 + [50518].param2 = 1,
83473 + [50518].param3 = 1,
83474 + [5052].file = "drivers/char/ppdev.c",
83475 + [5052].name = "pp_read",
83476 + [5052].param3 = 1,
83477 + [50562].file = "drivers/media/video/zoran/zoran_procfs.c",
83478 + [50562].name = "zoran_write",
83479 + [50562].param3 = 1,
83480 + [50617].file = "fs/hugetlbfs/inode.c",
83481 + [50617].name = "hugetlbfs_read_actor",
83482 + [50617].param2 = 1,
83483 + [50617].param4 = 1,
83484 + [50617].param5 = 1,
83485 + [50692].file = "lib/ts_bm.c",
83486 + [50692].name = "bm_init",
83487 + [50692].param2 = 1,
83488 + [50813].file = "mm/vmalloc.c",
83489 + [50813].name = "__vmalloc_node_flags",
83490 + [50813].param1 = 1,
83491 + [5087].file = "drivers/atm/solos-pci.c",
83492 + [5087].name = "console_store",
83493 + [5087].param4 = 1,
83494 + [5102].file = "drivers/usb/misc/usbtest.c",
83495 + [5102].name = "usbtest_alloc_urb",
83496 + [5102].param3 = 1,
83497 + [5102].param5 = 1,
83498 + [51061].file = "net/bluetooth/mgmt.c",
83499 + [51061].name = "pin_code_reply",
83500 + [51061].param4 = 1,
83501 + [51139].file = "fs/pipe.c",
83502 + [51139].name = "pipe_iov_copy_to_user",
83503 + [51139].param3 = 1,
83504 + [51177].file = "net/sunrpc/xprtrdma/transport.c",
83505 + [51177].name = "xprt_rdma_allocate",
83506 + [51177].param2 = 1,
83507 + [51182].file = "drivers/misc/sgi-xp/xpc_main.c",
83508 + [51182].name = "xpc_kzalloc_cacheline_aligned",
83509 + [51182].param1 = 1,
83510 + [51250].file = "fs/read_write.c",
83511 + [51250].name = "rw_copy_check_uvector",
83512 + [51250].param3 = 1,
83513 + [51253].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
83514 + [51253].name = "rt2x00debug_write_eeprom",
83515 + [51253].param3 = 1,
83516 + [51323].file = "sound/pci/ac97/ac97_pcm.c",
83517 + [51323].name = "snd_ac97_pcm_assign",
83518 + [51323].param2 = 1,
83519 + [51340].file = "drivers/usb/class/usblp.c",
83520 + [51340].name = "usblp_write",
83521 + [51340].param3 = 1,
83522 + [51499].file = "net/802/garp.c",
83523 + [51499].name = "garp_attr_create",
83524 + [51499].param3 = 1,
83525 + [51842].file = "drivers/hid/hid-core.c",
83526 + [51842].name = "hid_register_field",
83527 + [51842].param2 = 1,
83528 + [51842].param3 = 1,
83529 + [5197].file = "net/core/dev.c",
83530 + [5197].name = "dev_set_alias",
83531 + [5197].param3 = 1,
83532 + [5204].file = "drivers/media/video/usbvision/usbvision-video.c",
83533 + [5204].name = "usbvision_v4l2_read",
83534 + [5204].param3 = 1,
83535 + [5206].file = "drivers/media/dvb/ttpci/av7110_v4l.c",
83536 + [5206].name = "av7110_vbi_write",
83537 + [5206].param3 = 1,
83538 + [52086].file = "drivers/usb/image/mdc800.c",
83539 + [52086].name = "mdc800_device_read",
83540 + [52086].param3 = 1,
83541 + [52099].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
83542 + [52099].name = "do_surface_dirty_sou",
83543 + [52099].param7 = 1,
83544 + [52172].file = "drivers/pcmcia/cistpl.c",
83545 + [52172].name = "pccard_store_cis",
83546 + [52172].param6 = 1,
83547 + [52173].file = "drivers/misc/ibmasm/ibmasmfs.c",
83548 + [52173].name = "remote_settings_file_write",
83549 + [52173].param3 = 1,
83550 + [52199].file = "mm/nobootmem.c",
83551 + [52199].name = "__alloc_bootmem",
83552 + [52199].param1 = 1,
83553 + [52343].file = "drivers/usb/misc/adutux.c",
83554 + [52343].name = "adu_read",
83555 + [52343].param3 = 1,
83556 + [52401].file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
83557 + [52401].name = "r871x_set_wpa_ie",
83558 + [52401].param3 = 1,
83559 + [52699].file = "lib/ts_fsm.c",
83560 + [52699].name = "fsm_init",
83561 + [52699].param2 = 1,
83562 + [52721].file = "security/keys/encrypted-keys/encrypted.c",
83563 + [52721].name = "encrypted_instantiate",
83564 + [52721].param3 = 1,
83565 + [52902].file = "fs/xfs/kmem.h",
83566 + [52902].name = "kmem_zalloc_large",
83567 + [52902].param1 = 1,
83568 + [52950].file = "net/bluetooth/mgmt.c",
83569 + [52950].name = "set_discoverable",
83570 + [52950].param4 = 1,
83571 + [53041].file = "fs/libfs.c",
83572 + [53041].name = "simple_transaction_get",
83573 + [53041].param3 = 1,
83574 + [5313].file = "fs/gfs2/quota.c",
83575 + [5313].name = "do_sync",
83576 + [5313].param1 = 1,
83577 + [53209].file = "drivers/usb/host/ehci-sched.c",
83578 + [53209].name = "iso_sched_alloc",
83579 + [53209].param1 = 1,
83580 + [53302].file = "drivers/firewire/core-cdev.c",
83581 + [53302].name = "dispatch_ioctl",
83582 + [53302].param2 = 1,
83583 + [53355].file = "fs/ceph/dir.c",
83584 + [53355].name = "ceph_read_dir",
83585 + [53355].param3 = 1,
83586 + [53405].file = "drivers/media/video/videobuf-core.c",
83587 + [53405].name = "__videobuf_copy_to_user",
83588 + [53405].param4 = 1,
83589 + [53407].file = "net/wireless/sme.c",
83590 + [53407].name = "cfg80211_connect_result",
83591 + [53407].param4 = 1,
83592 + [53407].param6 = 1,
83593 + [53426].file = "fs/libfs.c",
83594 + [53426].name = "simple_transaction_read",
83595 + [53426].param3 = 1,
83596 + [5344].file = "security/selinux/ss/hashtab.c",
83597 + [5344].name = "hashtab_create",
83598 + [5344].param3 = 1,
83599 + [53513].file = "drivers/mmc/core/mmc_ops.c",
83600 + [53513].name = "mmc_send_bus_test",
83601 + [53513].param4 = 1,
83602 + [53626].file = "drivers/block/paride/pg.c",
83603 + [53626].name = "pg_read",
83604 + [53626].param3 = 1,
83605 + [53631].file = "mm/util.c",
83606 + [53631].name = "memdup_user",
83607 + [53631].param2 = 1,
83608 + [53674].file = "drivers/media/dvb/ttpci/av7110_ca.c",
83609 + [53674].name = "ci_ll_write",
83610 + [53674].param4 = 1,
83611 + [5389].file = "drivers/infiniband/core/uverbs_cmd.c",
83612 + [5389].name = "ib_uverbs_unmarshall_recv",
83613 + [5389].param5 = 1,
83614 + [53901].file = "net/rds/message.c",
83615 + [53901].name = "rds_message_alloc",
83616 + [53901].param1 = 1,
83617 + [53902].file = "net/sctp/socket.c",
83618 + [53902].name = "sctp_setsockopt_initmsg",
83619 + [53902].param3 = 1,
83620 + [5410].file = "kernel/kexec.c",
83621 + [5410].name = "sys_kexec_load",
83622 + [5410].param2 = 1,
83623 + [54172].file = "net/nfc/core.c",
83624 + [54172].name = "nfc_alloc_recv_skb",
83625 + [54172].param1 = 1,
83626 + [54182].file = "drivers/block/rbd.c",
83627 + [54182].name = "rbd_snap_add",
83628 + [54182].param4 = 1,
83629 + [54201].file = "drivers/platform/x86/asus_acpi.c",
83630 + [54201].name = "mled_proc_write",
83631 + [54201].param3 = 1,
83632 + [54263].file = "security/keys/trusted.c",
83633 + [54263].name = "trusted_instantiate",
83634 + [54263].param3 = 1,
83635 + [54296].file = "include/linux/mISDNif.h",
83636 + [54296].name = "_alloc_mISDN_skb",
83637 + [54296].param3 = 1,
83638 + [54298].file = "drivers/usb/wusbcore/crypto.c",
83639 + [54298].name = "wusb_ccm_mac",
83640 + [54298].param7 = 1,
83641 + [54318].file = "include/drm/drm_mem_util.h",
83642 + [54318].name = "drm_malloc_ab",
83643 + [54318].param1 = 1,
83644 + [54318].param2 = 1,
83645 + [54335].file = "drivers/md/dm-table.c",
83646 + [54335].name = "dm_vcalloc",
83647 + [54335].param1 = 1,
83648 + [54335].param2 = 1,
83649 + [54338].file = "fs/ntfs/malloc.h",
83650 + [54338].name = "ntfs_malloc_nofs",
83651 + [54338].param1 = 1,
83652 + [54339].file = "security/smack/smackfs.c",
83653 + [54339].name = "smk_write_cipso",
83654 + [54339].param3 = 1,
83655 + [54369].file = "drivers/usb/storage/realtek_cr.c",
83656 + [54369].name = "rts51x_read_mem",
83657 + [54369].param4 = 1,
83658 + [5438].file = "sound/core/memory.c",
83659 + [5438].name = "copy_to_user_fromio",
83660 + [5438].param3 = 1,
83661 + [54401].file = "lib/dynamic_debug.c",
83662 + [54401].name = "ddebug_proc_write",
83663 + [54401].param3 = 1,
83664 + [54467].file = "net/packet/af_packet.c",
83665 + [54467].name = "packet_setsockopt",
83666 + [54467].param5 = 1,
83667 + [54573].file = "ipc/sem.c",
83668 + [54573].name = "sys_semop",
83669 + [54573].param3 = 1,
83670 + [54583].file = "net/sctp/socket.c",
83671 + [54583].name = "sctp_setsockopt_peer_addr_params",
83672 + [54583].param3 = 1,
83673 + [54643].file = "drivers/isdn/hardware/eicon/divasi.c",
83674 + [54643].name = "um_idi_write",
83675 + [54643].param3 = 1,
83676 + [54657].file = "mm/migrate.c",
83677 + [54657].name = "do_pages_stat",
83678 + [54657].param2 = 1,
83679 + [54663].file = "drivers/isdn/hardware/eicon/platform.h",
83680 + [54663].name = "diva_os_malloc",
83681 + [54663].param2 = 1,
83682 + [54701].file = "drivers/misc/altera-stapl/altera-jtag.c",
83683 + [54701].name = "altera_swap_ir",
83684 + [54701].param2 = 1,
83685 + [54751].file = "drivers/infiniband/core/device.c",
83686 + [54751].name = "ib_alloc_device",
83687 + [54751].param1 = 1,
83688 + [54771].file = "drivers/isdn/mISDN/socket.c",
83689 + [54771].name = "_l2_alloc_skb",
83690 + [54771].param1 = 1,
83691 + [54777].file = "drivers/net/wireless/ath/ath6kl/debug.c",
83692 + [54777].name = "ath6kl_debug_roam_tbl_event",
83693 + [54777].param3 = 1,
83694 + [54806].file = "drivers/scsi/lpfc/lpfc_debugfs.c",
83695 + [54806].name = "lpfc_debugfs_dif_err_write",
83696 + [54806].param3 = 1,
83697 + [5494].file = "fs/cifs/cifsacl.c",
83698 + [5494].name = "cifs_idmap_key_instantiate",
83699 + [5494].param3 = 1,
83700 + [55066].file = "net/ipv6/ipv6_sockglue.c",
83701 + [55066].name = "do_ipv6_setsockopt",
83702 + [55066].param5 = 1,
83703 + [55105].file = "drivers/base/devres.c",
83704 + [55105].name = "devres_alloc",
83705 + [55105].param2 = 1,
83706 + [55115].file = "net/sctp/probe.c",
83707 + [55115].name = "sctpprobe_read",
83708 + [55115].param3 = 1,
83709 + [55155].file = "net/bluetooth/rfcomm/sock.c",
83710 + [55155].name = "rfcomm_sock_setsockopt",
83711 + [55155].param5 = 1,
83712 + [55187].file = "security/keys/keyctl.c",
83713 + [55187].name = "keyctl_describe_key",
83714 + [55187].param3 = 1,
83715 + [55253].file = "drivers/net/wireless/ray_cs.c",
83716 + [55253].name = "ray_cs_essid_proc_write",
83717 + [55253].param3 = 1,
83718 + [55341].file = "drivers/staging/sep/sep_driver.c",
83719 + [55341].name = "sep_prepare_input_output_dma_table_in_dcb",
83720 + [55341].param4 = 1,
83721 + [55341].param5 = 1,
83722 + [55417].file = "drivers/hv/channel.c",
83723 + [55417].name = "vmbus_open",
83724 + [55417].param2 = 1,
83725 + [55417].param3 = 1,
83726 + [5548].file = "drivers/media/media-entity.c",
83727 + [5548].name = "media_entity_init",
83728 + [5548].param2 = 1,
83729 + [5548].param4 = 1,
83730 + [55546].file = "drivers/spi/spi.c",
83731 + [55546].name = "spi_alloc_master",
83732 + [55546].param2 = 1,
83733 + [55580].file = "drivers/usb/mon/mon_bin.c",
83734 + [55580].name = "copy_from_buf",
83735 + [55580].param2 = 1,
83736 + [55584].file = "drivers/tty/tty_buffer.c",
83737 + [55584].name = "tty_buffer_alloc",
83738 + [55584].param2 = 1,
83739 + [55712].file = "drivers/char/mem.c",
83740 + [55712].name = "read_zero",
83741 + [55712].param3 = 1,
83742 + [55727].file = "drivers/media/video/stk-webcam.c",
83743 + [55727].name = "stk_prepare_sio_buffers",
83744 + [55727].param2 = 1,
83745 + [55816].file = "drivers/misc/altera-stapl/altera-jtag.c",
83746 + [55816].name = "altera_set_ir_pre",
83747 + [55816].param2 = 1,
83748 + [55826].file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
83749 + [55826].name = "ipath_get_base_info",
83750 + [55826].param3 = 1,
83751 + [5586].file = "net/atm/common.c",
83752 + [5586].name = "alloc_tx",
83753 + [5586].param2 = 1,
83754 + [55978].file = "drivers/usb/misc/iowarrior.c",
83755 + [55978].name = "iowarrior_write",
83756 + [55978].param3 = 1,
83757 + [56170].file = "drivers/usb/wusbcore/wa-xfer.c",
83758 + [56170].name = "__wa_xfer_setup_segs",
83759 + [56170].param2 = 1,
83760 + [56199].file = "fs/binfmt_misc.c",
83761 + [56199].name = "parse_command",
83762 + [56199].param2 = 1,
83763 + [56218].file = "drivers/mmc/card/mmc_test.c",
83764 + [56218].name = "mtf_test_write",
83765 + [56218].param3 = 1,
83766 + [56239].file = "fs/sysfs/file.c",
83767 + [56239].name = "fill_write_buffer",
83768 + [56239].param3 = 1,
83769 + [5624].file = "drivers/net/wireless/ath/ath9k/wmi.c",
83770 + [5624].name = "ath9k_wmi_cmd",
83771 + [5624].param4 = 1,
83772 + [56416].file = "drivers/misc/lkdtm.c",
83773 + [56416].name = "do_register_entry",
83774 + [56416].param4 = 1,
83775 + [56458].file = "drivers/usb/host/hwa-hc.c",
83776 + [56458].name = "__hwahc_op_set_ptk",
83777 + [56458].param5 = 1,
83778 + [56471].file = "include/linux/slab.h",
83779 + [56471].name = "kcalloc",
83780 + [56471].param1 = 1,
83781 + [56471].param2 = 1,
83782 + [56513].file = "fs/cifs/connect.c",
83783 + [56513].name = "cifs_readv_from_socket",
83784 + [56513].param3 = 1,
83785 + [56531].file = "net/bluetooth/l2cap_core.c",
83786 + [56531].name = "l2cap_send_cmd",
83787 + [56531].param4 = 1,
83788 + [56544].file = "drivers/block/drbd/drbd_receiver.c",
83789 + [56544].name = "receive_DataRequest",
83790 + [56544].param3 = 1,
83791 + [56609].file = "lib/mpi/mpi-internal.h",
83792 + [56609].name = "RESIZE_IF_NEEDED",
83793 + [56609].param2 = 1,
83794 + [56652].file = "drivers/misc/altera-stapl/altera-jtag.c",
83795 + [56652].name = "altera_set_dr_post",
83796 + [56652].param2 = 1,
83797 + [56653].file = "net/irda/af_irda.c",
83798 + [56653].name = "irda_setsockopt",
83799 + [56653].param5 = 1,
83800 + [56672].file = "drivers/char/agp/generic.c",
83801 + [56672].name = "agp_alloc_page_array",
83802 + [56672].param1 = 1,
83803 + [56798].file = "fs/bio.c",
83804 + [56798].name = "bio_alloc_map_data",
83805 + [56798].param2 = 1,
83806 + [56843].file = "drivers/scsi/scsi_transport_iscsi.c",
83807 + [56843].name = "iscsi_recv_pdu",
83808 + [56843].param4 = 1,
83809 + [56903].file = "drivers/mtd/mtdchar.c",
83810 + [56903].name = "mtdchar_readoob",
83811 + [56903].param4 = 1,
83812 + [5699].file = "net/sctp/socket.c",
83813 + [5699].name = "sctp_setsockopt_default_send_param",
83814 + [5699].param3 = 1,
83815 + [5704].file = "drivers/mtd/mtdswap.c",
83816 + [5704].name = "mtdswap_init",
83817 + [5704].param2 = 1,
83818 + [57128].file = "drivers/pnp/pnpbios/proc.c",
83819 + [57128].name = "pnpbios_proc_write",
83820 + [57128].param3 = 1,
83821 + [57190].file = "drivers/char/agp/generic.c",
83822 + [57190].name = "agp_generic_alloc_user",
83823 + [57190].param1 = 1,
83824 + [57252].file = "drivers/media/dvb/dvb-core/dmxdev.c",
83825 + [57252].name = "dvb_dmxdev_set_buffer_size",
83826 + [57252].param2 = 1,
83827 + [57392].file = "drivers/block/aoe/aoecmd.c",
83828 + [57392].name = "new_skb",
83829 + [57392].param1 = 1,
83830 + [57471].file = "drivers/media/video/sn9c102/sn9c102_core.c",
83831 + [57471].name = "sn9c102_read",
83832 + [57471].param3 = 1,
83833 + [57547].file = "security/keys/encrypted-keys/encrypted.c",
83834 + [57547].name = "get_derived_key",
83835 + [57547].param4 = 1,
83836 + [57552].file = "net/sunrpc/cache.c",
83837 + [57552].name = "cache_slow_downcall",
83838 + [57552].param2 = 1,
83839 + [57670].file = "drivers/bluetooth/btmrvl_debugfs.c",
83840 + [57670].name = "btmrvl_pscmd_write",
83841 + [57670].param3 = 1,
83842 + [57710].file = "include/linux/usb/wusb.h",
83843 + [57710].name = "wusb_prf_256",
83844 + [57710].param7 = 1,
83845 + [57724].file = "net/bluetooth/hci_sock.c",
83846 + [57724].name = "hci_sock_setsockopt",
83847 + [57724].param5 = 1,
83848 + [57761].file = "kernel/kexec.c",
83849 + [57761].name = "kimage_crash_alloc",
83850 + [57761].param3 = 1,
83851 + [57786].file = "net/ipv6/netfilter/ip6_tables.c",
83852 + [57786].name = "compat_do_ip6t_set_ctl",
83853 + [57786].param4 = 1,
83854 + [57872].file = "fs/ceph/xattr.c",
83855 + [57872].name = "ceph_setxattr",
83856 + [57872].param4 = 1,
83857 + [57927].file = "fs/read_write.c",
83858 + [57927].name = "sys_preadv",
83859 + [57927].param3 = 1,
83860 + [58012].file = "include/net/bluetooth/bluetooth.h",
83861 + [58012].name = "bt_skb_alloc",
83862 + [58012].param1 = 1,
83863 + [58020].file = "drivers/firewire/core-cdev.c",
83864 + [58020].name = "fw_device_op_ioctl",
83865 + [58020].param2 = 1,
83866 + [58043].file = "kernel/auditfilter.c",
83867 + [58043].name = "audit_unpack_string",
83868 + [58043].param3 = 1,
83869 + [58087].file = "kernel/module.c",
83870 + [58087].name = "module_alloc_update_bounds_rw",
83871 + [58087].param1 = 1,
83872 + [58124].file = "drivers/usb/misc/usbtest.c",
83873 + [58124].name = "ctrl_out",
83874 + [58124].param3 = 1,
83875 + [58124].param5 = 1,
83876 + [58217].file = "net/sctp/socket.c",
83877 + [58217].name = "sctp_setsockopt_peer_primary_addr",
83878 + [58217].param3 = 1,
83879 + [58263].file = "security/keys/keyring.c",
83880 + [58263].name = "keyring_read",
83881 + [58263].param3 = 1,
83882 + [5830].file = "drivers/gpu/vga/vga_switcheroo.c",
83883 + [5830].name = "vga_switcheroo_debugfs_write",
83884 + [5830].param3 = 1,
83885 + [58320].file = "drivers/scsi/scsi_proc.c",
83886 + [58320].name = "proc_scsi_write",
83887 + [58320].param3 = 1,
83888 + [58344].file = "net/sunrpc/cache.c",
83889 + [58344].name = "read_flush",
83890 + [58344].param3 = 1,
83891 + [58379].file = "mm/nobootmem.c",
83892 + [58379].name = "__alloc_bootmem_node",
83893 + [58379].param2 = 1,
83894 + [58597].file = "kernel/kfifo.c",
83895 + [58597].name = "__kfifo_to_user",
83896 + [58597].param3 = 1,
83897 + [58641].file = "drivers/usb/misc/adutux.c",
83898 + [58641].name = "adu_write",
83899 + [58641].param3 = 1,
83900 + [58709].file = "fs/compat.c",
83901 + [58709].name = "compat_sys_pwritev",
83902 + [58709].param3 = 1,
83903 + [58769].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
83904 + [58769].name = "zd_usb_read_fw",
83905 + [58769].param4 = 1,
83906 + [5876].file = "drivers/net/ppp/ppp_generic.c",
83907 + [5876].name = "ppp_write",
83908 + [5876].param3 = 1,
83909 + [58826].file = "net/sunrpc/xprt.c",
83910 + [58826].name = "xprt_alloc",
83911 + [58826].param2 = 1,
83912 + [58865].file = "include/linux/slub_def.h",
83913 + [58865].name = "kmalloc_order_trace",
83914 + [58865].param1 = 1,
83915 + [58867].file = "drivers/platform/x86/asus_acpi.c",
83916 + [58867].name = "wled_proc_write",
83917 + [58867].param3 = 1,
83918 + [58888].file = "fs/xattr.c",
83919 + [58888].name = "listxattr",
83920 + [58888].param3 = 1,
83921 + [58889].file = "kernel/trace/trace_kprobe.c",
83922 + [58889].name = "probes_write",
83923 + [58889].param3 = 1,
83924 + [58912].file = "drivers/lguest/core.c",
83925 + [58912].name = "__lgwrite",
83926 + [58912].param4 = 1,
83927 + [58918].file = "sound/core/pcm_native.c",
83928 + [58918].name = "snd_pcm_aio_write",
83929 + [58918].param3 = 1,
83930 + [58942].file = "drivers/block/aoe/aoedev.c",
83931 + [58942].name = "aoedev_flush",
83932 + [58942].param2 = 1,
83933 + [58958].file = "fs/fuse/control.c",
83934 + [58958].name = "fuse_conn_limit_write",
83935 + [58958].param3 = 1,
83936 + [59005].file = "drivers/staging/sep/sep_driver.c",
83937 + [59005].name = "sep_prepare_input_dma_table",
83938 + [59005].param2 = 1,
83939 + [59005].param3 = 1,
83940 + [59013].file = "fs/xfs/xfs_ioctl.c",
83941 + [59013].name = "xfs_handle_to_dentry",
83942 + [59013].param3 = 1,
83943 + [59034].file = "drivers/acpi/acpica/dsobject.c",
83944 + [59034].name = "acpi_ds_build_internal_package_obj",
83945 + [59034].param3 = 1,
83946 + [59073].file = "drivers/staging/speakup/i18n.c",
83947 + [59073].name = "msg_set",
83948 + [59073].param3 = 1,
83949 + [59074].file = "drivers/scsi/cxgbi/libcxgbi.c",
83950 + [59074].name = "ddp_make_gl",
83951 + [59074].param1 = 1,
83952 + [59297].file = "drivers/media/dvb/ttpci/av7110_av.c",
83953 + [59297].name = "dvb_play",
83954 + [59297].param3 = 1,
83955 + [59472].file = "drivers/misc/ibmasm/ibmasmfs.c",
83956 + [59472].name = "command_file_write",
83957 + [59472].param3 = 1,
83958 + [59504].file = "fs/exofs/super.c",
83959 + [59504].name = "__alloc_dev_table",
83960 + [59504].param2 = 1,
83961 + [59505].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
83962 + [59505].name = "pvr2_ioread_read",
83963 + [59505].param3 = 1,
83964 + [59681].file = "fs/xfs/kmem.c",
83965 + [59681].name = "kmem_alloc",
83966 + [59681].param1 = 1,
83967 + [5968].file = "net/sunrpc/sched.c",
83968 + [5968].name = "rpc_malloc",
83969 + [5968].param2 = 1,
83970 + [59695].file = "net/ipv4/netfilter/ipt_ULOG.c",
83971 + [59695].name = "ulog_alloc_skb",
83972 + [59695].param1 = 1,
83973 + [59838].file = "net/netlink/af_netlink.c",
83974 + [59838].name = "nl_pid_hash_zalloc",
83975 + [59838].param1 = 1,
83976 + [59856].file = "drivers/base/devres.c",
83977 + [59856].name = "devm_kzalloc",
83978 + [59856].param2 = 1,
83979 + [60066].file = "mm/filemap.c",
83980 + [60066].name = "iov_iter_copy_from_user",
83981 + [60066].param4 = 1,
83982 + [60185].file = "kernel/params.c",
83983 + [60185].name = "kmalloc_parameter",
83984 + [60185].param1 = 1,
83985 + [60198].file = "fs/nfs/nfs4proc.c",
83986 + [60198].name = "nfs4_write_cached_acl",
83987 + [60198].param3 = 1,
83988 + [60330].file = "drivers/media/video/w9966.c",
83989 + [60330].name = "w9966_v4l_read",
83990 + [60330].param3 = 1,
83991 + [604].file = "drivers/staging/rtl8712/usb_ops_linux.c",
83992 + [604].name = "r8712_usbctrl_vendorreq",
83993 + [604].param6 = 1,
83994 + [60543].file = "drivers/usb/class/usbtmc.c",
83995 + [60543].name = "usbtmc_read",
83996 + [60543].param3 = 1,
83997 + [60683].file = "sound/drivers/opl4/opl4_proc.c",
83998 + [60683].name = "snd_opl4_mem_proc_write",
83999 + [60683].param5 = 1,
84000 + [60693].file = "drivers/misc/hpilo.c",
84001 + [60693].name = "ilo_read",
84002 + [60693].param3 = 1,
84003 + [60744].file = "sound/pci/emu10k1/emuproc.c",
84004 + [60744].name = "snd_emu10k1_fx8010_read",
84005 + [60744].param5 = 1,
84006 + [60777].file = "fs/ntfs/malloc.h",
84007 + [60777].name = "ntfs_malloc_nofs_nofail",
84008 + [60777].param1 = 1,
84009 + [60833].file = "drivers/block/aoe/aoenet.c",
84010 + [60833].name = "set_aoe_iflist",
84011 + [60833].param2 = 1,
84012 + [60882].file = "drivers/input/joydev.c",
84013 + [60882].name = "joydev_compat_ioctl",
84014 + [60882].param2 = 1,
84015 + [60891].file = "kernel/sched/core.c",
84016 + [60891].name = "sys_sched_setaffinity",
84017 + [60891].param2 = 1,
84018 + [60920].file = "drivers/infiniband/hw/qib/qib_file_ops.c",
84019 + [60920].name = "qib_get_base_info",
84020 + [60920].param3 = 1,
84021 + [60928].file = "drivers/staging/bcm/Bcmchar.c",
84022 + [60928].name = "bcm_char_read",
84023 + [60928].param3 = 1,
84024 + [61122].file = "drivers/base/devres.c",
84025 + [61122].name = "alloc_dr",
84026 + [61122].param2 = 1,
84027 + [61254].file = "drivers/scsi/scsi_devinfo.c",
84028 + [61254].name = "proc_scsi_devinfo_write",
84029 + [61254].param3 = 1,
84030 + [61283].file = "drivers/net/wireless/ath/ath6kl/debug.c",
84031 + [61283].name = "ath6kl_fwlog_read",
84032 + [61283].param3 = 1,
84033 + [61289].file = "security/apparmor/apparmorfs.c",
84034 + [61289].name = "aa_simple_write_to_buffer",
84035 + [61289].param4 = 1,
84036 + [61389].file = "include/linux/slab.h",
84037 + [61389].name = "kzalloc_node",
84038 + [61389].param1 = 1,
84039 + [61441].file = "fs/ntfs/file.c",
84040 + [61441].name = "ntfs_copy_from_user_iovec",
84041 + [61441].param3 = 1,
84042 + [61441].param6 = 1,
84043 + [61552].file = "drivers/input/evdev.c",
84044 + [61552].name = "str_to_user",
84045 + [61552].param2 = 1,
84046 + [61673].file = "security/keys/trusted.c",
84047 + [61673].name = "trusted_update",
84048 + [61673].param3 = 1,
84049 + [61676].file = "kernel/module.c",
84050 + [61676].name = "module_alloc_update_bounds_rx",
84051 + [61676].param1 = 1,
84052 + [61684].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
84053 + [61684].name = "cxgb3_get_cpl_reply_skb",
84054 + [61684].param2 = 1,
84055 + [6173].file = "net/netlink/af_netlink.c",
84056 + [6173].name = "netlink_sendmsg",
84057 + [6173].param4 = 1,
84058 + [61770].file = "drivers/media/video/et61x251/et61x251_core.c",
84059 + [61770].name = "et61x251_read",
84060 + [61770].param3 = 1,
84061 + [61772].file = "fs/exofs/ore_raid.c",
84062 + [61772].name = "_sp2d_alloc",
84063 + [61772].param1 = 1,
84064 + [61772].param2 = 1,
84065 + [61772].param3 = 1,
84066 + [61926].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
84067 + [61926].name = "ddb_input_read",
84068 + [61926].param3 = 1,
84069 + [61932].file = "drivers/message/fusion/mptctl.c",
84070 + [61932].name = "__mptctl_ioctl",
84071 + [61932].param2 = 1,
84072 + [61966].file = "fs/nfs/nfs4proc.c",
84073 + [61966].name = "nfs4_alloc_slots",
84074 + [61966].param1 = 1,
84075 + [62081].file = "drivers/net/irda/vlsi_ir.c",
84076 + [62081].name = "vlsi_alloc_ring",
84077 + [62081].param3 = 1,
84078 + [62081].param4 = 1,
84079 + [62116].file = "fs/libfs.c",
84080 + [62116].name = "simple_attr_read",
84081 + [62116].param3 = 1,
84082 + [6211].file = "drivers/net/ethernet/amd/pcnet32.c",
84083 + [6211].name = "pcnet32_realloc_tx_ring",
84084 + [6211].param3 = 1,
84085 + [62294].file = "sound/core/info.c",
84086 + [62294].name = "resize_info_buffer",
84087 + [62294].param2 = 1,
84088 + [62387].file = "fs/nfs/idmap.c",
84089 + [62387].name = "nfs_idmap_lookup_id",
84090 + [62387].param2 = 1,
84091 + [62465].file = "drivers/misc/altera-stapl/altera-jtag.c",
84092 + [62465].name = "altera_set_dr_pre",
84093 + [62465].param2 = 1,
84094 + [62466].file = "lib/mpi/mpiutil.c",
84095 + [62466].name = "mpi_alloc",
84096 + [62466].param1 = 1,
84097 + [62495].file = "drivers/block/floppy.c",
84098 + [62495].name = "fallback_on_nodma_alloc",
84099 + [62495].param2 = 1,
84100 + [62498].file = "fs/xattr.c",
84101 + [62498].name = "sys_listxattr",
84102 + [62498].param3 = 1,
84103 + [625].file = "fs/read_write.c",
84104 + [625].name = "sys_pwritev",
84105 + [625].param3 = 1,
84106 + [62662].file = "drivers/message/fusion/mptctl.c",
84107 + [62662].name = "mptctl_getiocinfo",
84108 + [62662].param2 = 1,
84109 + [62669].file = "drivers/platform/x86/asus_acpi.c",
84110 + [62669].name = "tled_proc_write",
84111 + [62669].param3 = 1,
84112 + [62714].file = "security/keys/keyctl.c",
84113 + [62714].name = "keyctl_update_key",
84114 + [62714].param3 = 1,
84115 + [62760].file = "drivers/media/dvb/ttpci/av7110_av.c",
84116 + [62760].name = "play_iframe",
84117 + [62760].param3 = 1,
84118 + [62851].file = "fs/proc/vmcore.c",
84119 + [62851].name = "read_vmcore",
84120 + [62851].param3 = 1,
84121 + [62870].file = "fs/nfs/idmap.c",
84122 + [62870].name = "nfs_idmap_get_desc",
84123 + [62870].param2 = 1,
84124 + [62870].param4 = 1,
84125 + [62905].file = "net/caif/cfpkt_skbuff.c",
84126 + [62905].name = "cfpkt_create",
84127 + [62905].param1 = 1,
84128 + [62920].file = "drivers/net/wireless/b43/phy_n.c",
84129 + [62920].name = "b43_nphy_load_samples",
84130 + [62920].param3 = 1,
84131 + [62925].file = "include/rdma/ib_verbs.h",
84132 + [62925].name = "ib_copy_from_udata",
84133 + [62925].param3 = 1,
84134 + [62934].file = "drivers/net/wireless/wl1251/cmd.c",
84135 + [62934].name = "wl1251_cmd_template_set",
84136 + [62934].param4 = 1,
84137 + [62940].file = "drivers/scsi/libsrp.c",
84138 + [62940].name = "srp_ring_alloc",
84139 + [62940].param2 = 1,
84140 + [62967].file = "security/keys/encrypted-keys/encrypted.c",
84141 + [62967].name = "encrypted_update",
84142 + [62967].param3 = 1,
84143 + [62970].file = "net/sched/sch_api.c",
84144 + [62970].name = "qdisc_class_hash_alloc",
84145 + [62970].param1 = 1,
84146 + [62999].file = "net/core/neighbour.c",
84147 + [62999].name = "neigh_hash_alloc",
84148 + [62999].param1 = 1,
84149 + [63007].file = "fs/proc/base.c",
84150 + [63007].name = "proc_coredump_filter_write",
84151 + [63007].param3 = 1,
84152 + [63010].file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
84153 + [63010].name = "ttm_page_pool_free",
84154 + [63010].param2 = 1,
84155 + [63045].file = "crypto/shash.c",
84156 + [63045].name = "shash_setkey_unaligned",
84157 + [63045].param3 = 1,
84158 + [63075].file = "kernel/relay.c",
84159 + [63075].name = "relay_alloc_page_array",
84160 + [63075].param1 = 1,
84161 + [63076].file = "fs/cifs/xattr.c",
84162 + [63076].name = "cifs_setxattr",
84163 + [63076].param4 = 1,
84164 + [63091].file = "drivers/net/usb/pegasus.c",
84165 + [63091].name = "get_registers",
84166 + [63091].param3 = 1,
84167 + [6331].file = "drivers/atm/solos-pci.c",
84168 + [6331].name = "solos_param_store",
84169 + [6331].param4 = 1,
84170 + [63367].file = "net/netfilter/ipset/ip_set_core.c",
84171 + [63367].name = "ip_set_alloc",
84172 + [63367].param1 = 1,
84173 + [63489].file = "drivers/bluetooth/btmrvl_debugfs.c",
84174 + [63489].name = "btmrvl_hscfgcmd_write",
84175 + [63489].param3 = 1,
84176 + [63490].file = "crypto/shash.c",
84177 + [63490].name = "shash_compat_setkey",
84178 + [63490].param3 = 1,
84179 + [63605].file = "mm/mempool.c",
84180 + [63605].name = "mempool_kmalloc",
84181 + [63605].param2 = 1,
84182 + [63633].file = "drivers/bluetooth/btmrvl_sdio.c",
84183 + [63633].name = "btmrvl_sdio_host_to_card",
84184 + [63633].param3 = 1,
84185 + [63961].file = "fs/xattr.c",
84186 + [63961].name = "sys_flistxattr",
84187 + [63961].param3 = 1,
84188 + [63964].file = "net/sctp/socket.c",
84189 + [63964].name = "sctp_setsockopt_maxseg",
84190 + [63964].param3 = 1,
84191 + [63988].file = "drivers/input/evdev.c",
84192 + [63988].name = "evdev_ioctl_compat",
84193 + [63988].param2 = 1,
84194 + [64055].file = "drivers/media/dvb/ttpci/av7110_av.c",
84195 + [64055].name = "dvb_aplay",
84196 + [64055].param3 = 1,
84197 + [64156].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84198 + [64156].name = "ath6kl_mgmt_tx",
84199 + [64156].param9 = 1,
84200 + [64226].file = "drivers/md/persistent-data/dm-space-map-checker.c",
84201 + [64226].name = "ca_extend",
84202 + [64226].param2 = 1,
84203 + [64227].file = "mm/nobootmem.c",
84204 + [64227].name = "__alloc_bootmem_node_nopanic",
84205 + [64227].param2 = 1,
84206 + [64351].file = "kernel/kfifo.c",
84207 + [64351].name = "kfifo_copy_from_user",
84208 + [64351].param3 = 1,
84209 + [64392].file = "drivers/mmc/core/mmc_ops.c",
84210 + [64392].name = "mmc_send_cxd_data",
84211 + [64392].param5 = 1,
84212 + [64423].file = "kernel/sched/core.c",
84213 + [64423].name = "get_user_cpu_mask",
84214 + [64423].param2 = 1,
84215 + [64432].file = "security/selinux/selinuxfs.c",
84216 + [64432].name = "sel_write_create",
84217 + [64432].param3 = 1,
84218 + [64471].file = "drivers/bluetooth/btmrvl_debugfs.c",
84219 + [64471].name = "btmrvl_hscmd_write",
84220 + [64471].param3 = 1,
84221 + [64667].file = "sound/core/oss/pcm_oss.c",
84222 + [64667].name = "snd_pcm_oss_read",
84223 + [64667].param3 = 1,
84224 + [64689].file = "sound/isa/gus/gus_dram.c",
84225 + [64689].name = "snd_gus_dram_read",
84226 + [64689].param4 = 1,
84227 + [64692].file = "fs/binfmt_misc.c",
84228 + [64692].name = "bm_entry_write",
84229 + [64692].param3 = 1,
84230 + [64705].file = "drivers/staging/iio/accel/sca3000_ring.c",
84231 + [64705].name = "sca3000_read_first_n_hw_rb",
84232 + [64705].param2 = 1,
84233 + [64713].file = "fs/cifs/connect.c",
84234 + [64713].name = "extract_hostname",
84235 + [64713].param1 = 1,
84236 + [64743].file = "fs/ocfs2/dlmfs/dlmfs.c",
84237 + [64743].name = "dlmfs_file_read",
84238 + [64743].param3 = 1,
84239 + [64771].file = "security/keys/encrypted-keys/encrypted.c",
84240 + [64771].name = "datablob_format",
84241 + [64771].param2 = 1,
84242 + [6477].file = "net/bluetooth/mgmt.c",
84243 + [6477].name = "mgmt_pending_add",
84244 + [6477].param5 = 1,
84245 + [64906].file = "drivers/net/wireless/b43legacy/debugfs.c",
84246 + [64906].name = "b43legacy_debugfs_write",
84247 + [64906].param3 = 1,
84248 + [64913].file = "sound/core/oss/pcm_oss.c",
84249 + [64913].name = "snd_pcm_oss_write1",
84250 + [64913].param3 = 1,
84251 + [64961].file = "drivers/spi/spidev.c",
84252 + [64961].name = "spidev_ioctl",
84253 + [64961].param2 = 1,
84254 + [65033].file = "crypto/shash.c",
84255 + [65033].name = "shash_async_setkey",
84256 + [65033].param3 = 1,
84257 + [65093].file = "security/integrity/evm/evm_secfs.c",
84258 + [65093].name = "evm_write_key",
84259 + [65093].param3 = 1,
84260 + [6514].file = "mm/nobootmem.c",
84261 + [6514].name = "__alloc_bootmem_low",
84262 + [6514].param1 = 1,
84263 + [65169].file = "net/core/skbuff.c",
84264 + [65169].name = "dev_alloc_skb",
84265 + [65169].param1 = 1,
84266 + [6517].file = "drivers/md/dm-table.c",
84267 + [6517].name = "alloc_targets",
84268 + [6517].param2 = 1,
84269 + [65205].file = "drivers/input/evdev.c",
84270 + [65205].name = "handle_eviocgbit",
84271 + [65205].param3 = 1,
84272 + [65237].file = "kernel/profile.c",
84273 + [65237].name = "read_profile",
84274 + [65237].param3 = 1,
84275 + [65343].file = "kernel/trace/trace.c",
84276 + [65343].name = "tracing_clock_write",
84277 + [65343].param3 = 1,
84278 + [65345].file = "lib/xz/xz_dec_lzma2.c",
84279 + [65345].name = "xz_dec_lzma2_create",
84280 + [65345].param2 = 1,
84281 + [65409].file = "net/802/garp.c",
84282 + [65409].name = "garp_request_join",
84283 + [65409].param4 = 1,
84284 + [65432].file = "drivers/hid/hid-roccat-kone.c",
84285 + [65432].name = "kone_receive",
84286 + [65432].param4 = 1,
84287 + [65514].file = "drivers/media/video/gspca/t613.c",
84288 + [65514].name = "reg_w_ixbuf",
84289 + [65514].param4 = 1,
84290 + [6551].file = "drivers/usb/host/xhci-mem.c",
84291 + [6551].name = "xhci_alloc_stream_info",
84292 + [6551].param3 = 1,
84293 + [65535].file = "drivers/media/dvb/dvb-usb/opera1.c",
84294 + [65535].name = "opera1_xilinx_rw",
84295 + [65535].param5 = 1,
84296 + [6672].file = "drivers/net/wireless/b43/debugfs.c",
84297 + [6672].name = "b43_debugfs_write",
84298 + [6672].param3 = 1,
84299 + [6691].file = "drivers/acpi/proc.c",
84300 + [6691].name = "acpi_system_write_wakeup_device",
84301 + [6691].param3 = 1,
84302 + [6865].file = "drivers/staging/iio/ring_sw.c",
84303 + [6865].name = "iio_read_first_n_sw_rb",
84304 + [6865].param2 = 1,
84305 + [6867].file = "fs/coda/psdev.c",
84306 + [6867].name = "coda_psdev_read",
84307 + [6867].param3 = 1,
84308 + [6891].file = "drivers/bluetooth/btmrvl_debugfs.c",
84309 + [6891].name = "btmrvl_gpiogap_write",
84310 + [6891].param3 = 1,
84311 + [6944].file = "drivers/ide/ide-proc.c",
84312 + [6944].name = "ide_settings_proc_write",
84313 + [6944].param3 = 1,
84314 + [6950].file = "drivers/isdn/capi/capi.c",
84315 + [6950].name = "capi_write",
84316 + [6950].param3 = 1,
84317 + [697].file = "sound/isa/gus/gus_dram.c",
84318 + [697].name = "snd_gus_dram_peek",
84319 + [697].param4 = 1,
84320 + [7066].file = "security/keys/keyctl.c",
84321 + [7066].name = "keyctl_instantiate_key_common",
84322 + [7066].param4 = 1,
84323 + [7125].file = "include/net/nfc/nci_core.h",
84324 + [7125].name = "nci_skb_alloc",
84325 + [7125].param2 = 1,
84326 + [7129].file = "mm/maccess.c",
84327 + [7129].name = "__probe_kernel_read",
84328 + [7129].param3 = 1,
84329 + [7158].file = "kernel/trace/trace.c",
84330 + [7158].name = "tracing_read_pipe",
84331 + [7158].param3 = 1,
84332 + [720].file = "sound/pci/rme9652/hdsp.c",
84333 + [720].name = "snd_hdsp_playback_copy",
84334 + [720].param5 = 1,
84335 + [7236].file = "drivers/gpu/drm/drm_crtc.c",
84336 + [7236].name = "drm_plane_init",
84337 + [7236].param6 = 1,
84338 + [7411].file = "drivers/vhost/vhost.c",
84339 + [7411].name = "__vhost_add_used_n",
84340 + [7411].param3 = 1,
84341 + [7432].file = "net/bluetooth/mgmt.c",
84342 + [7432].name = "mgmt_event",
84343 + [7432].param4 = 1,
84344 + [7488].file = "security/keys/user_defined.c",
84345 + [7488].name = "user_read",
84346 + [7488].param3 = 1,
84347 + [7551].file = "drivers/input/touchscreen/ad7879-spi.c",
84348 + [7551].name = "ad7879_spi_xfer",
84349 + [7551].param3 = 1,
84350 + [7671].file = "mm/nobootmem.c",
84351 + [7671].name = "__alloc_bootmem_node_high",
84352 + [7671].param2 = 1,
84353 + [7676].file = "drivers/acpi/custom_method.c",
84354 + [7676].name = "cm_write",
84355 + [7676].param3 = 1,
84356 + [7693].file = "net/sctp/socket.c",
84357 + [7693].name = "sctp_setsockopt_associnfo",
84358 + [7693].param3 = 1,
84359 + [7697].file = "security/selinux/selinuxfs.c",
84360 + [7697].name = "sel_write_access",
84361 + [7697].param3 = 1,
84362 + [7843].file = "fs/compat.c",
84363 + [7843].name = "compat_sys_readv",
84364 + [7843].param3 = 1,
84365 + [7883].file = "net/sched/sch_sfq.c",
84366 + [7883].name = "sfq_alloc",
84367 + [7883].param1 = 1,
84368 + [7924].file = "drivers/media/video/cx18/cx18-fileops.c",
84369 + [7924].name = "cx18_read_pos",
84370 + [7924].param3 = 1,
84371 + [7958].file = "drivers/gpu/vga/vgaarb.c",
84372 + [7958].name = "vga_arb_write",
84373 + [7958].param3 = 1,
84374 + [7976].file = "drivers/usb/gadget/rndis.c",
84375 + [7976].name = "rndis_add_response",
84376 + [7976].param2 = 1,
84377 + [7985].file = "net/mac80211/cfg.c",
84378 + [7985].name = "ieee80211_mgmt_tx",
84379 + [7985].param9 = 1,
84380 + [8014].file = "net/netfilter/ipset/ip_set_list_set.c",
84381 + [8014].name = "init_list_set",
84382 + [8014].param2 = 1,
84383 + [8014].param3 = 1,
84384 + [8126].file = "sound/soc/soc-core.c",
84385 + [8126].name = "codec_reg_read_file",
84386 + [8126].param3 = 1,
84387 + [8317].file = "security/smack/smackfs.c",
84388 + [8317].name = "smk_write_ambient",
84389 + [8317].param3 = 1,
84390 + [8335].file = "drivers/media/dvb/dvb-core/dmxdev.c",
84391 + [8335].name = "dvb_dvr_set_buffer_size",
84392 + [8335].param2 = 1,
84393 + [8383].file = "kernel/module.c",
84394 + [8383].name = "copy_and_check",
84395 + [8383].param3 = 1,
84396 + [8411].file = "net/caif/cfpkt_skbuff.c",
84397 + [8411].name = "cfpkt_append",
84398 + [8411].param3 = 1,
84399 + [8536].file = "fs/cifs/dns_resolve.c",
84400 + [8536].name = "dns_resolve_server_name_to_ip",
84401 + [8536].param1 = 1,
84402 + [857].file = "drivers/virtio/virtio_ring.c",
84403 + [857].name = "virtqueue_add_buf",
84404 + [857].param3 = 1,
84405 + [857].param4 = 1,
84406 + [8650].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
84407 + [8650].name = "vmw_kms_present",
84408 + [8650].param9 = 1,
84409 + [8654].file = "drivers/net/wireless/mwifiex/sdio.c",
84410 + [8654].name = "mwifiex_alloc_sdio_mpa_buffers",
84411 + [8654].param2 = 1,
84412 + [8654].param3 = 1,
84413 + [865].file = "drivers/base/regmap/regmap-debugfs.c",
84414 + [865].name = "regmap_access_read_file",
84415 + [865].param3 = 1,
84416 + [8663].file = "net/bridge/netfilter/ebtables.c",
84417 + [8663].name = "do_update_counters",
84418 + [8663].param4 = 1,
84419 + [8684].file = "fs/read_write.c",
84420 + [8684].name = "sys_writev",
84421 + [8684].param3 = 1,
84422 + [8699].file = "security/selinux/selinuxfs.c",
84423 + [8699].name = "sel_commit_bools_write",
84424 + [8699].param3 = 1,
84425 + [8764].file = "drivers/usb/core/devio.c",
84426 + [8764].name = "usbdev_read",
84427 + [8764].param3 = 1,
84428 + [8802].file = "fs/dlm/user.c",
84429 + [8802].name = "device_write",
84430 + [8802].param3 = 1,
84431 + [8810].file = "net/mac80211/debugfs_sta.c",
84432 + [8810].name = "sta_agg_status_write",
84433 + [8810].param3 = 1,
84434 + [8815].file = "security/tomoyo/securityfs_if.c",
84435 + [8815].name = "tomoyo_write_self",
84436 + [8815].param3 = 1,
84437 + [8821].file = "net/wireless/sme.c",
84438 + [8821].name = "cfg80211_roamed",
84439 + [8821].param5 = 1,
84440 + [8821].param7 = 1,
84441 + [8833].file = "security/selinux/ss/services.c",
84442 + [8833].name = "security_context_to_sid",
84443 + [8833].param2 = 1,
84444 + [8838].file = "lib/mpi/mpi-bit.c",
84445 + [8838].name = "mpi_lshift_limbs",
84446 + [8838].param2 = 1,
84447 + [8851].file = "net/key/af_key.c",
84448 + [8851].name = "pfkey_sendmsg",
84449 + [8851].param4 = 1,
84450 + [8917].file = "net/can/raw.c",
84451 + [8917].name = "raw_setsockopt",
84452 + [8917].param5 = 1,
84453 + [8983].file = "include/linux/skbuff.h",
84454 + [8983].name = "alloc_skb",
84455 + [8983].param1 = 1,
84456 + [9117].file = "drivers/base/regmap/regcache-rbtree.c",
84457 + [9117].name = "regcache_rbtree_insert_to_block",
84458 + [9117].param5 = 1,
84459 + [9226].file = "mm/migrate.c",
84460 + [9226].name = "sys_move_pages",
84461 + [9226].param2 = 1,
84462 + [9304].file = "kernel/auditfilter.c",
84463 + [9304].name = "audit_init_entry",
84464 + [9304].param1 = 1,
84465 + [9317].file = "drivers/usb/wusbcore/wa-nep.c",
84466 + [9317].name = "wa_nep_queue",
84467 + [9317].param2 = 1,
84468 + [9341].file = "drivers/acpi/apei/erst-dbg.c",
84469 + [9341].name = "erst_dbg_write",
84470 + [9341].param3 = 1,
84471 + [9386].file = "fs/exofs/ore.c",
84472 + [9386].name = "_ore_get_io_state",
84473 + [9386].param3 = 1,
84474 + [9386].param4 = 1,
84475 + [9386].param5 = 1,
84476 + [9538].file = "crypto/blkcipher.c",
84477 + [9538].name = "blkcipher_copy_iv",
84478 + [9538].param3 = 1,
84479 + [9546].file = "drivers/video/fbmem.c",
84480 + [9546].name = "fb_write",
84481 + [9546].param3 = 1,
84482 + [9601].file = "kernel/kfifo.c",
84483 + [9601].name = "__kfifo_from_user",
84484 + [9601].param3 = 1,
84485 + [9618].file = "security/selinux/selinuxfs.c",
84486 + [9618].name = "sel_write_bool",
84487 + [9618].param3 = 1,
84488 + [9768].file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
84489 + [9768].name = "vmw_execbuf_process",
84490 + [9768].param5 = 1,
84491 + [9828].file = "drivers/media/dvb/dvb-core/dmxdev.c",
84492 + [9828].name = "dvb_demux_do_ioctl",
84493 + [9828].param3 = 1,
84494 + [9870].file = "net/atm/addr.c",
84495 + [9870].name = "atm_get_addr",
84496 + [9870].param3 = 1,
84497 + [9977].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
84498 + [9977].name = "zd_usb_iowrite16v_async",
84499 + [9977].param3 = 1,
84500 + [16344].collision = 1,
84501 + [307].collision = 1,
84502 + [31649].collision = 1,
84503 + [33040].collision = 1,
84504 + [45231].collision = 1,
84505 + [60651].collision = 1,
84506 +};
84507 diff --git a/tools/gcc/size_overflow_hash2.h b/tools/gcc/size_overflow_hash2.h
84508 new file mode 100644
84509 index 0000000..9ec45ae
84510 --- /dev/null
84511 +++ b/tools/gcc/size_overflow_hash2.h
84512 @@ -0,0 +1,35 @@
84513 +struct size_overflow_hash size_overflow_hash2[65536] = {
84514 + [22224].file = "fs/proc/vmcore.c",
84515 + [22224].name = "read_from_oldmem",
84516 + [22224].param2 = 1,
84517 + [2344].file = "fs/ecryptfs/crypto.c",
84518 + [2344].name = "ecryptfs_decode_and_decrypt_filename",
84519 + [2344].param5 = 1,
84520 + [2515].file = "fs/ecryptfs/crypto.c",
84521 + [2515].name = "ecryptfs_copy_filename",
84522 + [2515].param4 = 1,
84523 + [26518].file = "drivers/gpu/vga/vgaarb.c",
84524 + [26518].name = "vga_arb_read",
84525 + [26518].param3 = 1,
84526 + [30632].file = "drivers/ide/ide-proc.c",
84527 + [30632].name = "ide_driver_proc_write",
84528 + [30632].param3 = 1,
84529 + [39024].file = "lib/scatterlist.c",
84530 + [39024].name = "sg_kmalloc",
84531 + [39024].param1 = 1,
84532 + [50359].file = "kernel/sched/core.c",
84533 + [50359].name = "alloc_sched_domains",
84534 + [50359].param1 = 1,
84535 + [53262].file = "drivers/block/aoe/aoechr.c",
84536 + [53262].name = "revalidate",
84537 + [53262].param2 = 1,
84538 + [56432].file = "drivers/base/regmap/regmap-debugfs.c",
84539 + [56432].name = "regmap_map_read_file",
84540 + [56432].param3 = 1,
84541 + [57500].file = "drivers/spi/spidev.c",
84542 + [57500].name = "spidev_write",
84543 + [57500].param3 = 1,
84544 + [8155].file = "drivers/hv/channel.c",
84545 + [8155].name = "vmbus_establish_gpadl",
84546 + [8155].param3 = 1,
84547 +};
84548 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
84549 new file mode 100644
84550 index 0000000..255439f
84551 --- /dev/null
84552 +++ b/tools/gcc/size_overflow_plugin.c
84553 @@ -0,0 +1,1110 @@
84554 +/*
84555 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
84556 + * Licensed under the GPL v2, or (at your option) v3
84557 + *
84558 + * Homepage:
84559 + * http://www.grsecurity.net/~ephox/overflow_plugin/
84560 + *
84561 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
84562 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
84563 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
84564 + *
84565 + * Usage:
84566 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
84567 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
84568 + */
84569 +
84570 +#include "gcc-plugin.h"
84571 +#include "config.h"
84572 +#include "system.h"
84573 +#include "coretypes.h"
84574 +#include "tree.h"
84575 +#include "tree-pass.h"
84576 +#include "intl.h"
84577 +#include "plugin-version.h"
84578 +#include "tm.h"
84579 +#include "toplev.h"
84580 +#include "function.h"
84581 +#include "tree-flow.h"
84582 +#include "plugin.h"
84583 +#include "gimple.h"
84584 +#include "c-common.h"
84585 +#include "diagnostic.h"
84586 +#include "cfgloop.h"
84587 +
84588 +struct size_overflow_hash {
84589 + const char *name;
84590 + const char *file;
84591 + unsigned short collision:1;
84592 + unsigned short param1:1;
84593 + unsigned short param2:1;
84594 + unsigned short param3:1;
84595 + unsigned short param4:1;
84596 + unsigned short param5:1;
84597 + unsigned short param6:1;
84598 + unsigned short param7:1;
84599 + unsigned short param8:1;
84600 + unsigned short param9:1;
84601 +};
84602 +
84603 +#include "size_overflow_hash1.h"
84604 +#include "size_overflow_hash2.h"
84605 +
84606 +#define __unused __attribute__((__unused__))
84607 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
84608 +#define BEFORE_STMT true
84609 +#define AFTER_STMT false
84610 +#define CREATE_NEW_VAR NULL_TREE
84611 +
84612 +int plugin_is_GPL_compatible;
84613 +void debug_gimple_stmt (gimple gs);
84614 +
84615 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
84616 +static tree signed_size_overflow_type;
84617 +static tree unsigned_size_overflow_type;
84618 +static tree report_size_overflow_decl;
84619 +static tree const_char_ptr_type_node;
84620 +static unsigned int handle_function(void);
84621 +
84622 +static struct plugin_info size_overflow_plugin_info = {
84623 + .version = "20120409beta",
84624 + .help = "no-size_overflow\tturn off size overflow checking\n",
84625 +};
84626 +
84627 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
84628 +{
84629 + unsigned int arg_count = type_num_arguments(*node);
84630 +
84631 + for (; args; args = TREE_CHAIN(args)) {
84632 + tree position = TREE_VALUE(args);
84633 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
84634 + error("handle_size_overflow_attribute: overflow parameter outside range.");
84635 + *no_add_attrs = true;
84636 + }
84637 + }
84638 + return NULL_TREE;
84639 +}
84640 +
84641 +static struct attribute_spec no_size_overflow_attr = {
84642 + .name = "size_overflow",
84643 + .min_length = 1,
84644 + .max_length = -1,
84645 + .decl_required = false,
84646 + .type_required = true,
84647 + .function_type_required = true,
84648 + .handler = handle_size_overflow_attribute
84649 +};
84650 +
84651 +static void register_attributes(void __unused *event_data, void __unused *data)
84652 +{
84653 + register_attribute(&no_size_overflow_attr);
84654 +}
84655 +
84656 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
84657 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
84658 +{
84659 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
84660 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
84661 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
84662 +
84663 + const unsigned int m = 0x57559429;
84664 + const unsigned int n = 0x5052acdb;
84665 + const unsigned int *key4 = (const unsigned int *)key;
84666 + unsigned int h = len;
84667 + unsigned int k = len + seed + n;
84668 + unsigned long long p;
84669 +
84670 + while (len >= 8) {
84671 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
84672 + len -= 8;
84673 + }
84674 + if (len >= 4) {
84675 + cwmixb(key4[0]) key4 += 1;
84676 + len -= 4;
84677 + }
84678 + if (len)
84679 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
84680 + cwmixb(h ^ (k + n));
84681 + return k ^ h;
84682 +
84683 +#undef cwfold
84684 +#undef cwmixa
84685 +#undef cwmixb
84686 +}
84687 +
84688 +static inline unsigned int size_overflow_hash(const char *fndecl, unsigned int seed)
84689 +{
84690 + return CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
84691 +}
84692 +
84693 +static inline tree get_original_function_decl(tree fndecl)
84694 +{
84695 + if (DECL_ABSTRACT_ORIGIN(fndecl))
84696 + return DECL_ABSTRACT_ORIGIN(fndecl);
84697 + return fndecl;
84698 +}
84699 +
84700 +static inline gimple get_def_stmt(tree node)
84701 +{
84702 + gcc_assert(TREE_CODE(node) == SSA_NAME);
84703 + return SSA_NAME_DEF_STMT(node);
84704 +}
84705 +
84706 +static struct size_overflow_hash *get_function_hash(tree fndecl)
84707 +{
84708 + unsigned int hash;
84709 + const char *func = NAME(fndecl);
84710 +
84711 + hash = size_overflow_hash(func, 0);
84712 +
84713 + if (size_overflow_hash1[hash].collision) {
84714 + hash = size_overflow_hash(func, 23432);
84715 + return &size_overflow_hash2[hash];
84716 + }
84717 + return &size_overflow_hash1[hash];
84718 +}
84719 +
84720 +static void check_arg_type(tree var)
84721 +{
84722 + tree type = TREE_TYPE(var);
84723 + enum tree_code code = TREE_CODE(type);
84724 +
84725 + gcc_assert(code == INTEGER_TYPE ||
84726 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
84727 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
84728 +}
84729 +
84730 +static void check_missing_attribute(tree arg)
84731 +{
84732 + tree var, type, func = get_original_function_decl(current_function_decl);
84733 + const char *curfunc = NAME(func);
84734 + unsigned int new_hash, argnum = 1;
84735 + struct size_overflow_hash *hash;
84736 + location_t loc;
84737 + expanded_location xloc;
84738 + bool match = false;
84739 +
84740 + type = TREE_TYPE(arg);
84741 + // skip function pointers
84742 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
84743 + return;
84744 +
84745 + loc = DECL_SOURCE_LOCATION(func);
84746 + xloc = expand_location(loc);
84747 +
84748 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
84749 + return;
84750 +
84751 + hash = get_function_hash(func);
84752 + if (hash->name && !strcmp(hash->name, NAME(func)) && !strcmp(hash->file, xloc.file))
84753 + return;
84754 +
84755 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
84756 +
84757 + if (TREE_CODE(arg) == SSA_NAME)
84758 + arg = SSA_NAME_VAR(arg);
84759 +
84760 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
84761 + if (strcmp(NAME(arg), NAME(var))) {
84762 + argnum++;
84763 + continue;
84764 + }
84765 + check_arg_type(var);
84766 +
84767 + match = true;
84768 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
84769 + return;
84770 + break;
84771 + }
84772 + if (!match) {
84773 + warning(0, "check_missing_attribute: cannot find the %s argument in %s", NAME(arg), NAME(func));
84774 + return;
84775 + }
84776 +
84777 +#define check_param(num) \
84778 + if (num == argnum && hash->param##num) \
84779 + return;
84780 + check_param(1);
84781 + check_param(2);
84782 + check_param(3);
84783 + check_param(4);
84784 + check_param(5);
84785 + check_param(6);
84786 + check_param(7);
84787 + check_param(8);
84788 + check_param(9);
84789 +#undef check_param
84790 +
84791 + new_hash = size_overflow_hash(curfunc, 0);
84792 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s", curfunc, curfunc, argnum, new_hash, xloc.file);
84793 +}
84794 +
84795 +static tree create_new_var(tree type)
84796 +{
84797 + tree new_var = create_tmp_var(type, "cicus");
84798 +
84799 + add_referenced_var(new_var);
84800 + mark_sym_for_renaming(new_var);
84801 + return new_var;
84802 +}
84803 +
84804 +static bool is_bool(tree node)
84805 +{
84806 + tree type;
84807 +
84808 + if (node == NULL_TREE)
84809 + return false;
84810 +
84811 + type = TREE_TYPE(node);
84812 + if (!INTEGRAL_TYPE_P(type))
84813 + return false;
84814 + if (TREE_CODE(type) == BOOLEAN_TYPE)
84815 + return true;
84816 + if (TYPE_PRECISION(type) == 1)
84817 + return true;
84818 + return false;
84819 +}
84820 +
84821 +static tree cast_a_tree(tree type, tree var)
84822 +{
84823 + gcc_assert(fold_convertible_p(type, var));
84824 +
84825 + return fold_convert(type, var);
84826 +}
84827 +
84828 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
84829 +{
84830 + gimple assign;
84831 +
84832 + if (new_var == CREATE_NEW_VAR)
84833 + new_var = create_new_var(type);
84834 +
84835 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
84836 + gimple_set_location(assign, loc);
84837 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
84838 +
84839 + return assign;
84840 +}
84841 +
84842 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
84843 +{
84844 + tree oldstmt_rhs1;
84845 + enum tree_code code;
84846 + gimple stmt;
84847 + gimple_stmt_iterator gsi;
84848 +
84849 + if (!*potentionally_overflowed)
84850 + return NULL_TREE;
84851 +
84852 + if (rhs1 == NULL_TREE) {
84853 + debug_gimple_stmt(oldstmt);
84854 + error("create_assign: rhs1 is NULL_TREE");
84855 + gcc_unreachable();
84856 + }
84857 +
84858 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
84859 + code = TREE_CODE(oldstmt_rhs1);
84860 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
84861 + check_missing_attribute(oldstmt_rhs1);
84862 +
84863 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
84864 + gsi = gsi_for_stmt(oldstmt);
84865 + if (before)
84866 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
84867 + else
84868 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
84869 + update_stmt(stmt);
84870 + pointer_set_insert(visited, oldstmt);
84871 + return gimple_get_lhs(stmt);
84872 +}
84873 +
84874 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
84875 +{
84876 + tree new_var, lhs = gimple_get_lhs(oldstmt);
84877 + gimple stmt;
84878 + gimple_stmt_iterator gsi;
84879 +
84880 + if (!*potentionally_overflowed)
84881 + return NULL_TREE;
84882 +
84883 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
84884 + rhs1 = gimple_assign_rhs1(oldstmt);
84885 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
84886 + }
84887 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
84888 + rhs2 = gimple_assign_rhs2(oldstmt);
84889 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
84890 + }
84891 +
84892 + stmt = gimple_copy(oldstmt);
84893 + gimple_set_location(stmt, gimple_location(oldstmt));
84894 +
84895 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
84896 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
84897 +
84898 + if (is_bool(lhs))
84899 + new_var = SSA_NAME_VAR(lhs);
84900 + else
84901 + new_var = create_new_var(signed_size_overflow_type);
84902 + new_var = make_ssa_name(new_var, stmt);
84903 + gimple_set_lhs(stmt, new_var);
84904 +
84905 + if (rhs1 != NULL_TREE) {
84906 + if (!gimple_assign_cast_p(oldstmt))
84907 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
84908 + gimple_assign_set_rhs1(stmt, rhs1);
84909 + }
84910 +
84911 + if (rhs2 != NULL_TREE)
84912 + gimple_assign_set_rhs2(stmt, rhs2);
84913 +#if BUILDING_GCC_VERSION >= 4007
84914 + if (rhs3 != NULL_TREE)
84915 + gimple_assign_set_rhs3(stmt, rhs3);
84916 +#endif
84917 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
84918 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
84919 +
84920 + gsi = gsi_for_stmt(oldstmt);
84921 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
84922 + update_stmt(stmt);
84923 + pointer_set_insert(visited, oldstmt);
84924 + return gimple_get_lhs(stmt);
84925 +}
84926 +
84927 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
84928 +{
84929 + basic_block bb;
84930 + gimple phi;
84931 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
84932 +
84933 + bb = gsi_bb(gsi);
84934 +
84935 + phi = create_phi_node(var, bb);
84936 + gsi = gsi_last(phi_nodes(bb));
84937 + gsi_remove(&gsi, false);
84938 +
84939 + gsi = gsi_for_stmt(oldstmt);
84940 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
84941 + gimple_set_bb(phi, bb);
84942 + return phi;
84943 +}
84944 +
84945 +static tree signed_cast_constant(tree node)
84946 +{
84947 + gcc_assert(is_gimple_constant(node));
84948 +
84949 + return cast_a_tree(signed_size_overflow_type, node);
84950 +}
84951 +
84952 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
84953 +{
84954 + basic_block bb;
84955 + gimple newstmt, def_stmt;
84956 + gimple_stmt_iterator gsi;
84957 +
84958 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
84959 + if (TREE_CODE(arg) == SSA_NAME) {
84960 + def_stmt = get_def_stmt(arg);
84961 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
84962 + gsi = gsi_for_stmt(def_stmt);
84963 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
84964 + return newstmt;
84965 + }
84966 + }
84967 +
84968 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
84969 + gsi = gsi_after_labels(bb);
84970 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
84971 + return newstmt;
84972 +}
84973 +
84974 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
84975 +{
84976 + gimple newstmt;
84977 + gimple_stmt_iterator gsi;
84978 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
84979 + gimple def_newstmt = get_def_stmt(new_rhs);
84980 +
84981 + gsi_insert = gsi_insert_after;
84982 + gsi = gsi_for_stmt(def_newstmt);
84983 +
84984 + switch (gimple_code(get_def_stmt(arg))) {
84985 + case GIMPLE_PHI:
84986 + newstmt = gimple_build_assign(new_var, new_rhs);
84987 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
84988 + gsi_insert = gsi_insert_before;
84989 + break;
84990 + case GIMPLE_ASM:
84991 + case GIMPLE_CALL:
84992 + newstmt = gimple_build_assign(new_var, new_rhs);
84993 + break;
84994 + case GIMPLE_ASSIGN:
84995 + newstmt = gimple_copy(def_newstmt);
84996 + break;
84997 + default:
84998 + /* unknown gimple_code (handle_build_new_phi_arg) */
84999 + gcc_unreachable();
85000 + }
85001 +
85002 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
85003 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
85004 + return newstmt;
85005 +}
85006 +
85007 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
85008 +{
85009 + gimple newstmt;
85010 + tree new_rhs;
85011 +
85012 + new_rhs = expand(visited, potentionally_overflowed, arg);
85013 +
85014 + if (new_rhs == NULL_TREE)
85015 + return NULL_TREE;
85016 +
85017 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
85018 + update_stmt(newstmt);
85019 + return gimple_get_lhs(newstmt);
85020 +}
85021 +
85022 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
85023 +{
85024 + gimple phi;
85025 + tree new_var = create_new_var(signed_size_overflow_type);
85026 + unsigned int i, n = gimple_phi_num_args(oldstmt);
85027 +
85028 + pointer_set_insert(visited, oldstmt);
85029 + phi = overflow_create_phi_node(oldstmt, new_var);
85030 + for (i = 0; i < n; i++) {
85031 + tree arg, lhs;
85032 +
85033 + arg = gimple_phi_arg_def(oldstmt, i);
85034 + if (is_gimple_constant(arg))
85035 + arg = signed_cast_constant(arg);
85036 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
85037 + if (lhs == NULL_TREE)
85038 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
85039 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
85040 + }
85041 +
85042 + update_stmt(phi);
85043 + return gimple_phi_result(phi);
85044 +}
85045 +
85046 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85047 +{
85048 + gimple def_stmt = get_def_stmt(var);
85049 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
85050 +
85051 + *potentionally_overflowed = true;
85052 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85053 + if (new_rhs1 == NULL_TREE) {
85054 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
85055 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85056 + else
85057 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
85058 + }
85059 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
85060 +}
85061 +
85062 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85063 +{
85064 + gimple def_stmt = get_def_stmt(var);
85065 + tree rhs1 = gimple_assign_rhs1(def_stmt);
85066 +
85067 + if (is_gimple_constant(rhs1))
85068 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
85069 +
85070 + switch (TREE_CODE(rhs1)) {
85071 + case SSA_NAME:
85072 + return handle_unary_rhs(visited, potentionally_overflowed, var);
85073 +
85074 + case ARRAY_REF:
85075 + case ADDR_EXPR:
85076 + case COMPONENT_REF:
85077 + case COND_EXPR:
85078 + case INDIRECT_REF:
85079 +#if BUILDING_GCC_VERSION >= 4006
85080 + case MEM_REF:
85081 +#endif
85082 + case PARM_DECL:
85083 + case TARGET_MEM_REF:
85084 + case VAR_DECL:
85085 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85086 +
85087 + default:
85088 + debug_gimple_stmt(def_stmt);
85089 + debug_tree(rhs1);
85090 + gcc_unreachable();
85091 + }
85092 +}
85093 +
85094 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
85095 +{
85096 + gimple cond_stmt;
85097 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
85098 +
85099 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
85100 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
85101 + update_stmt(cond_stmt);
85102 +}
85103 +
85104 +static tree create_string_param(tree string)
85105 +{
85106 + tree array_ref = build4(ARRAY_REF, TREE_TYPE(string), string, integer_zero_node, NULL, NULL);
85107 +
85108 + return build1(ADDR_EXPR, ptr_type_node, array_ref);
85109 +}
85110 +
85111 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
85112 +{
85113 + gimple func_stmt, def_stmt;
85114 + tree current_func, loc_file, loc_line;
85115 + expanded_location xloc;
85116 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
85117 +
85118 + def_stmt = get_def_stmt(arg);
85119 + xloc = expand_location(gimple_location(def_stmt));
85120 +
85121 + if (!gimple_has_location(def_stmt)) {
85122 + xloc = expand_location(gimple_location(stmt));
85123 + if (!gimple_has_location(stmt))
85124 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
85125 + }
85126 +
85127 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
85128 +
85129 + loc_file = build_string(strlen(xloc.file), xloc.file);
85130 + TREE_TYPE(loc_file) = char_array_type_node;
85131 + loc_file = create_string_param(loc_file);
85132 +
85133 + current_func = build_string(IDENTIFIER_LENGTH(DECL_NAME(current_function_decl)), NAME(current_function_decl));
85134 + TREE_TYPE(current_func) = char_array_type_node;
85135 + current_func = create_string_param(current_func);
85136 +
85137 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
85138 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
85139 +
85140 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
85141 +}
85142 +
85143 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
85144 +{
85145 + basic_block cond_bb, join_bb, bb_true;
85146 + edge e;
85147 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85148 +// location_t loc = gimple_location(stmt);
85149 +
85150 + cond_bb = gimple_bb(stmt);
85151 + gsi_prev(&gsi);
85152 + if (gsi_end_p(gsi))
85153 + e = split_block_after_labels(cond_bb);
85154 + else
85155 + e = split_block(cond_bb, gsi_stmt(gsi));
85156 + cond_bb = e->src;
85157 + join_bb = e->dest;
85158 + e->flags = EDGE_FALSE_VALUE;
85159 + e->probability = REG_BR_PROB_BASE;
85160 +
85161 + bb_true = create_empty_bb(cond_bb);
85162 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
85163 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
85164 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
85165 +
85166 + if (dom_info_available_p(CDI_DOMINATORS)) {
85167 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
85168 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
85169 + }
85170 +
85171 + if (current_loops != NULL) {
85172 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
85173 + add_bb_to_loop(bb_true, cond_bb->loop_father);
85174 + }
85175 +
85176 + insert_cond(cond_bb, arg, cond_code, type_value);
85177 + insert_cond_result(bb_true, stmt, arg);
85178 +
85179 +// inform(loc, "Integer size_overflow check applied here.");
85180 +}
85181 +
85182 +static tree get_type_for_check(tree rhs)
85183 +{
85184 + tree def_rhs;
85185 + gimple def_stmt = get_def_stmt(rhs);
85186 +
85187 + if (!gimple_assign_cast_p(def_stmt))
85188 + return TREE_TYPE(rhs);
85189 + def_rhs = gimple_assign_rhs1(def_stmt);
85190 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
85191 + return TREE_TYPE(def_rhs);
85192 + return TREE_TYPE(rhs);
85193 +}
85194 +
85195 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
85196 +{
85197 + gimple ucast_stmt;
85198 + gimple_stmt_iterator gsi;
85199 + location_t loc = gimple_location(stmt);
85200 +
85201 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
85202 + gsi = gsi_for_stmt(stmt);
85203 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
85204 + return ucast_stmt;
85205 +}
85206 +
85207 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
85208 +{
85209 + tree type_max, type_min, rhs_type;
85210 + gimple ucast_stmt;
85211 +
85212 + if (!*potentionally_overflowed)
85213 + return;
85214 +
85215 + rhs_type = get_type_for_check(rhs);
85216 +
85217 + if (TYPE_UNSIGNED(rhs_type)) {
85218 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
85219 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85220 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
85221 + } else {
85222 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85223 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
85224 +
85225 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
85226 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
85227 + }
85228 +}
85229 +
85230 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
85231 +{
85232 + gimple assign;
85233 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85234 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
85235 +
85236 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
85237 +
85238 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
85239 + if (new_rhs == NULL_TREE)
85240 + return NULL_TREE;
85241 +
85242 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
85243 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85244 + update_stmt(assign);
85245 + return gimple_get_lhs(assign);
85246 +}
85247 +
85248 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
85249 +{
85250 + tree new_rhs, cast_rhs;
85251 +
85252 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
85253 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85254 +
85255 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
85256 + if (new_rhs != NULL_TREE) {
85257 + gimple_assign_set_rhs(def_stmt, new_rhs);
85258 + update_stmt(def_stmt);
85259 +
85260 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
85261 +
85262 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
85263 + }
85264 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85265 +}
85266 +
85267 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85268 +{
85269 + tree rhs1, rhs2;
85270 + gimple def_stmt = get_def_stmt(var);
85271 + tree new_rhs1 = NULL_TREE;
85272 + tree new_rhs2 = NULL_TREE;
85273 +
85274 + rhs1 = gimple_assign_rhs1(def_stmt);
85275 + rhs2 = gimple_assign_rhs2(def_stmt);
85276 +
85277 + /* no DImode/TImode division in the 32/64 bit kernel */
85278 + switch (gimple_assign_rhs_code(def_stmt)) {
85279 + case RDIV_EXPR:
85280 + case TRUNC_DIV_EXPR:
85281 + case CEIL_DIV_EXPR:
85282 + case FLOOR_DIV_EXPR:
85283 + case ROUND_DIV_EXPR:
85284 + case TRUNC_MOD_EXPR:
85285 + case CEIL_MOD_EXPR:
85286 + case FLOOR_MOD_EXPR:
85287 + case ROUND_MOD_EXPR:
85288 + case EXACT_DIV_EXPR:
85289 + case POINTER_PLUS_EXPR:
85290 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85291 + default:
85292 + break;
85293 + }
85294 +
85295 + *potentionally_overflowed = true;
85296 +
85297 + if (TREE_CODE(rhs1) == SSA_NAME)
85298 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85299 + if (TREE_CODE(rhs2) == SSA_NAME)
85300 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
85301 +
85302 + if (is_gimple_constant(rhs2))
85303 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
85304 +
85305 + if (is_gimple_constant(rhs1))
85306 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
85307 +
85308 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85309 +}
85310 +
85311 +#if BUILDING_GCC_VERSION >= 4007
85312 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
85313 +{
85314 + if (is_gimple_constant(rhs))
85315 + return signed_cast_constant(rhs);
85316 + if (TREE_CODE(rhs) != SSA_NAME)
85317 + return NULL_TREE;
85318 + return expand(visited, potentionally_overflowed, rhs);
85319 +}
85320 +
85321 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85322 +{
85323 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
85324 + gimple def_stmt = get_def_stmt(var);
85325 +
85326 + *potentionally_overflowed = true;
85327 +
85328 + rhs1 = gimple_assign_rhs1(def_stmt);
85329 + rhs2 = gimple_assign_rhs2(def_stmt);
85330 + rhs3 = gimple_assign_rhs3(def_stmt);
85331 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
85332 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
85333 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
85334 +
85335 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
85336 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
85337 + error("handle_ternary_ops: unknown rhs");
85338 + gcc_unreachable();
85339 +}
85340 +#endif
85341 +
85342 +static void set_size_overflow_type(tree node)
85343 +{
85344 + switch (TYPE_MODE(TREE_TYPE(node))) {
85345 + case SImode:
85346 + signed_size_overflow_type = intDI_type_node;
85347 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85348 + break;
85349 + case DImode:
85350 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
85351 + signed_size_overflow_type = intDI_type_node;
85352 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85353 + } else {
85354 + signed_size_overflow_type = intTI_type_node;
85355 + unsigned_size_overflow_type = unsigned_intTI_type_node;
85356 + }
85357 + break;
85358 + default:
85359 + error("set_size_overflow_type: unsupported gcc configuration.");
85360 + gcc_unreachable();
85361 + }
85362 +}
85363 +
85364 +static tree expand_visited(gimple def_stmt)
85365 +{
85366 + gimple tmp;
85367 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
85368 +
85369 + gsi_next(&gsi);
85370 + tmp = gsi_stmt(gsi);
85371 + switch (gimple_code(tmp)) {
85372 + case GIMPLE_ASSIGN:
85373 + return gimple_get_lhs(tmp);
85374 + case GIMPLE_PHI:
85375 + return gimple_phi_result(tmp);
85376 + case GIMPLE_CALL:
85377 + return gimple_call_lhs(tmp);
85378 + default:
85379 + return NULL_TREE;
85380 + }
85381 +}
85382 +
85383 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85384 +{
85385 + gimple def_stmt;
85386 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
85387 +
85388 + if (is_gimple_constant(var))
85389 + return NULL_TREE;
85390 +
85391 + if (TREE_CODE(var) == ADDR_EXPR)
85392 + return NULL_TREE;
85393 +
85394 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE);
85395 + if (code != INTEGER_TYPE)
85396 + return NULL_TREE;
85397 +
85398 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
85399 + check_missing_attribute(var);
85400 + return NULL_TREE;
85401 + }
85402 +
85403 + def_stmt = get_def_stmt(var);
85404 +
85405 + if (!def_stmt)
85406 + return NULL_TREE;
85407 +
85408 + if (pointer_set_contains(visited, def_stmt))
85409 + return expand_visited(def_stmt);
85410 +
85411 + switch (gimple_code(def_stmt)) {
85412 + case GIMPLE_NOP:
85413 + check_missing_attribute(var);
85414 + return NULL_TREE;
85415 + case GIMPLE_PHI:
85416 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
85417 + case GIMPLE_CALL:
85418 + case GIMPLE_ASM:
85419 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85420 + case GIMPLE_ASSIGN:
85421 + switch (gimple_num_ops(def_stmt)) {
85422 + case 2:
85423 + return handle_unary_ops(visited, potentionally_overflowed, var);
85424 + case 3:
85425 + return handle_binary_ops(visited, potentionally_overflowed, var);
85426 +#if BUILDING_GCC_VERSION >= 4007
85427 + case 4:
85428 + return handle_ternary_ops(visited, potentionally_overflowed, var);
85429 +#endif
85430 + }
85431 + default:
85432 + debug_gimple_stmt(def_stmt);
85433 + error("expand: unknown gimple code");
85434 + gcc_unreachable();
85435 + }
85436 +}
85437 +
85438 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
85439 +{
85440 + gimple assign;
85441 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85442 + tree origtype = TREE_TYPE(origarg);
85443 +
85444 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
85445 +
85446 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
85447 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85448 + update_stmt(assign);
85449 +
85450 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
85451 + update_stmt(stmt);
85452 +}
85453 +
85454 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
85455 +{
85456 + const char *origid;
85457 + tree arg, origarg;
85458 +
85459 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
85460 + gcc_assert(gimple_call_num_args(stmt) > argnum);
85461 + return gimple_call_arg(stmt, argnum);
85462 + }
85463 +
85464 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
85465 + while (origarg && argnum) {
85466 + argnum--;
85467 + origarg = TREE_CHAIN(origarg);
85468 + }
85469 +
85470 + gcc_assert(argnum == 0);
85471 +
85472 + gcc_assert(origarg != NULL_TREE);
85473 + origid = NAME(origarg);
85474 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
85475 + if (!strcmp(origid, NAME(arg)))
85476 + return arg;
85477 + }
85478 + return NULL_TREE;
85479 +}
85480 +
85481 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
85482 +{
85483 + struct pointer_set_t *visited;
85484 + tree arg, newarg, type_max;
85485 + gimple ucast_stmt;
85486 + bool potentionally_overflowed;
85487 +
85488 + arg = get_function_arg(argnum, stmt, fndecl);
85489 + if (arg == NULL_TREE)
85490 + return;
85491 +
85492 + if (is_gimple_constant(arg))
85493 + return;
85494 + if (TREE_CODE(arg) != SSA_NAME)
85495 + return;
85496 +
85497 + check_arg_type(arg);
85498 +
85499 + set_size_overflow_type(arg);
85500 +
85501 + visited = pointer_set_create();
85502 + potentionally_overflowed = false;
85503 + newarg = expand(visited, &potentionally_overflowed, arg);
85504 + pointer_set_destroy(visited);
85505 +
85506 + if (newarg == NULL_TREE || !potentionally_overflowed)
85507 + return;
85508 +
85509 + change_function_arg(stmt, arg, argnum, newarg);
85510 +
85511 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
85512 +
85513 + type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
85514 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
85515 +}
85516 +
85517 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
85518 +{
85519 + tree p = TREE_VALUE(attr);
85520 + do {
85521 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
85522 + p = TREE_CHAIN(p);
85523 + } while (p);
85524 +}
85525 +
85526 +static void handle_function_by_hash(gimple stmt, tree fndecl)
85527 +{
85528 + struct size_overflow_hash *hash;
85529 + expanded_location xloc;
85530 +
85531 + hash = get_function_hash(fndecl);
85532 + xloc = expand_location(DECL_SOURCE_LOCATION(fndecl));
85533 +
85534 + fndecl = get_original_function_decl(fndecl);
85535 + if (!hash->name || !hash->file)
85536 + return;
85537 + if (strcmp(hash->name, NAME(fndecl)) || strcmp(hash->file, xloc.file))
85538 + return;
85539 +
85540 +#define search_param(argnum) \
85541 + if (hash->param##argnum) \
85542 + handle_function_arg(stmt, fndecl, argnum - 1);
85543 +
85544 + search_param(1);
85545 + search_param(2);
85546 + search_param(3);
85547 + search_param(4);
85548 + search_param(5);
85549 + search_param(6);
85550 + search_param(7);
85551 + search_param(8);
85552 + search_param(9);
85553 +#undef search_param
85554 +}
85555 +
85556 +static unsigned int handle_function(void)
85557 +{
85558 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
85559 + int saved_last_basic_block = last_basic_block;
85560 +
85561 + do {
85562 + gimple_stmt_iterator gsi;
85563 + basic_block next = bb->next_bb;
85564 +
85565 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85566 + tree fndecl, attr;
85567 + gimple stmt = gsi_stmt(gsi);
85568 +
85569 + if (!(is_gimple_call(stmt)))
85570 + continue;
85571 + fndecl = gimple_call_fndecl(stmt);
85572 + if (fndecl == NULL_TREE)
85573 + continue;
85574 + if (gimple_call_num_args(stmt) == 0)
85575 + continue;
85576 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
85577 + if (!attr || !TREE_VALUE(attr))
85578 + handle_function_by_hash(stmt, fndecl);
85579 + else
85580 + handle_function_by_attribute(stmt, attr, fndecl);
85581 + gsi = gsi_for_stmt(stmt);
85582 + }
85583 + bb = next;
85584 + } while (bb && bb->index <= saved_last_basic_block);
85585 + return 0;
85586 +}
85587 +
85588 +static struct gimple_opt_pass size_overflow_pass = {
85589 + .pass = {
85590 + .type = GIMPLE_PASS,
85591 + .name = "size_overflow",
85592 + .gate = NULL,
85593 + .execute = handle_function,
85594 + .sub = NULL,
85595 + .next = NULL,
85596 + .static_pass_number = 0,
85597 + .tv_id = TV_NONE,
85598 + .properties_required = PROP_cfg | PROP_referenced_vars,
85599 + .properties_provided = 0,
85600 + .properties_destroyed = 0,
85601 + .todo_flags_start = 0,
85602 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
85603 + }
85604 +};
85605 +
85606 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
85607 +{
85608 + tree fntype;
85609 +
85610 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
85611 +
85612 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
85613 + fntype = build_function_type_list(void_type_node,
85614 + const_char_ptr_type_node,
85615 + unsigned_type_node,
85616 + const_char_ptr_type_node,
85617 + NULL_TREE);
85618 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
85619 +
85620 + TREE_PUBLIC(report_size_overflow_decl) = 1;
85621 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
85622 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
85623 +}
85624 +
85625 +extern struct gimple_opt_pass pass_dce;
85626 +
85627 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85628 +{
85629 + int i;
85630 + const char * const plugin_name = plugin_info->base_name;
85631 + const int argc = plugin_info->argc;
85632 + const struct plugin_argument * const argv = plugin_info->argv;
85633 + bool enable = true;
85634 +
85635 + struct register_pass_info size_overflow_pass_info = {
85636 + .pass = &size_overflow_pass.pass,
85637 + .reference_pass_name = "ssa",
85638 + .ref_pass_instance_number = 1,
85639 + .pos_op = PASS_POS_INSERT_AFTER
85640 + };
85641 +
85642 + if (!plugin_default_version_check(version, &gcc_version)) {
85643 + error(G_("incompatible gcc/plugin versions"));
85644 + return 1;
85645 + }
85646 +
85647 + for (i = 0; i < argc; ++i) {
85648 + if (!(strcmp(argv[i].key, "no-size_overflow"))) {
85649 + enable = false;
85650 + continue;
85651 + }
85652 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85653 + }
85654 +
85655 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
85656 + if (enable) {
85657 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
85658 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
85659 + }
85660 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
85661 +
85662 + return 0;
85663 +}
85664 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
85665 new file mode 100644
85666 index 0000000..b87ec9d
85667 --- /dev/null
85668 +++ b/tools/gcc/stackleak_plugin.c
85669 @@ -0,0 +1,313 @@
85670 +/*
85671 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85672 + * Licensed under the GPL v2
85673 + *
85674 + * Note: the choice of the license means that the compilation process is
85675 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85676 + * but for the kernel it doesn't matter since it doesn't link against
85677 + * any of the gcc libraries
85678 + *
85679 + * gcc plugin to help implement various PaX features
85680 + *
85681 + * - track lowest stack pointer
85682 + *
85683 + * TODO:
85684 + * - initialize all local variables
85685 + *
85686 + * BUGS:
85687 + * - none known
85688 + */
85689 +#include "gcc-plugin.h"
85690 +#include "config.h"
85691 +#include "system.h"
85692 +#include "coretypes.h"
85693 +#include "tree.h"
85694 +#include "tree-pass.h"
85695 +#include "flags.h"
85696 +#include "intl.h"
85697 +#include "toplev.h"
85698 +#include "plugin.h"
85699 +//#include "expr.h" where are you...
85700 +#include "diagnostic.h"
85701 +#include "plugin-version.h"
85702 +#include "tm.h"
85703 +#include "function.h"
85704 +#include "basic-block.h"
85705 +#include "gimple.h"
85706 +#include "rtl.h"
85707 +#include "emit-rtl.h"
85708 +
85709 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85710 +
85711 +int plugin_is_GPL_compatible;
85712 +
85713 +static int track_frame_size = -1;
85714 +static const char track_function[] = "pax_track_stack";
85715 +static const char check_function[] = "pax_check_alloca";
85716 +static bool init_locals;
85717 +
85718 +static struct plugin_info stackleak_plugin_info = {
85719 + .version = "201203140940",
85720 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
85721 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
85722 +};
85723 +
85724 +static bool gate_stackleak_track_stack(void);
85725 +static unsigned int execute_stackleak_tree_instrument(void);
85726 +static unsigned int execute_stackleak_final(void);
85727 +
85728 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
85729 + .pass = {
85730 + .type = GIMPLE_PASS,
85731 + .name = "stackleak_tree_instrument",
85732 + .gate = gate_stackleak_track_stack,
85733 + .execute = execute_stackleak_tree_instrument,
85734 + .sub = NULL,
85735 + .next = NULL,
85736 + .static_pass_number = 0,
85737 + .tv_id = TV_NONE,
85738 + .properties_required = PROP_gimple_leh | PROP_cfg,
85739 + .properties_provided = 0,
85740 + .properties_destroyed = 0,
85741 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
85742 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
85743 + }
85744 +};
85745 +
85746 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
85747 + .pass = {
85748 + .type = RTL_PASS,
85749 + .name = "stackleak_final",
85750 + .gate = gate_stackleak_track_stack,
85751 + .execute = execute_stackleak_final,
85752 + .sub = NULL,
85753 + .next = NULL,
85754 + .static_pass_number = 0,
85755 + .tv_id = TV_NONE,
85756 + .properties_required = 0,
85757 + .properties_provided = 0,
85758 + .properties_destroyed = 0,
85759 + .todo_flags_start = 0,
85760 + .todo_flags_finish = TODO_dump_func
85761 + }
85762 +};
85763 +
85764 +static bool gate_stackleak_track_stack(void)
85765 +{
85766 + return track_frame_size >= 0;
85767 +}
85768 +
85769 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
85770 +{
85771 + gimple check_alloca;
85772 + tree fntype, fndecl, alloca_size;
85773 +
85774 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
85775 + fndecl = build_fn_decl(check_function, fntype);
85776 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85777 +
85778 + // insert call to void pax_check_alloca(unsigned long size)
85779 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
85780 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
85781 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
85782 +}
85783 +
85784 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
85785 +{
85786 + gimple track_stack;
85787 + tree fntype, fndecl;
85788 +
85789 + fntype = build_function_type_list(void_type_node, NULL_TREE);
85790 + fndecl = build_fn_decl(track_function, fntype);
85791 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85792 +
85793 + // insert call to void pax_track_stack(void)
85794 + track_stack = gimple_build_call(fndecl, 0);
85795 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
85796 +}
85797 +
85798 +#if BUILDING_GCC_VERSION == 4005
85799 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
85800 +{
85801 + tree fndecl;
85802 +
85803 + if (!is_gimple_call(stmt))
85804 + return false;
85805 + fndecl = gimple_call_fndecl(stmt);
85806 + if (!fndecl)
85807 + return false;
85808 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
85809 + return false;
85810 +// print_node(stderr, "pax", fndecl, 4);
85811 + return DECL_FUNCTION_CODE(fndecl) == code;
85812 +}
85813 +#endif
85814 +
85815 +static bool is_alloca(gimple stmt)
85816 +{
85817 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
85818 + return true;
85819 +
85820 +#if BUILDING_GCC_VERSION >= 4007
85821 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
85822 + return true;
85823 +#endif
85824 +
85825 + return false;
85826 +}
85827 +
85828 +static unsigned int execute_stackleak_tree_instrument(void)
85829 +{
85830 + basic_block bb, entry_bb;
85831 + bool prologue_instrumented = false, is_leaf = true;
85832 +
85833 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
85834 +
85835 + // 1. loop through BBs and GIMPLE statements
85836 + FOR_EACH_BB(bb) {
85837 + gimple_stmt_iterator gsi;
85838 +
85839 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85840 + gimple stmt;
85841 +
85842 + stmt = gsi_stmt(gsi);
85843 +
85844 + if (is_gimple_call(stmt))
85845 + is_leaf = false;
85846 +
85847 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
85848 + if (!is_alloca(stmt))
85849 + continue;
85850 +
85851 + // 2. insert stack overflow check before each __builtin_alloca call
85852 + stackleak_check_alloca(&gsi);
85853 +
85854 + // 3. insert track call after each __builtin_alloca call
85855 + stackleak_add_instrumentation(&gsi);
85856 + if (bb == entry_bb)
85857 + prologue_instrumented = true;
85858 + }
85859 + }
85860 +
85861 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
85862 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
85863 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
85864 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
85865 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
85866 + return 0;
85867 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
85868 + return 0;
85869 +
85870 + // 4. insert track call at the beginning
85871 + if (!prologue_instrumented) {
85872 + gimple_stmt_iterator gsi;
85873 +
85874 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
85875 + if (dom_info_available_p(CDI_DOMINATORS))
85876 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
85877 + gsi = gsi_start_bb(bb);
85878 + stackleak_add_instrumentation(&gsi);
85879 + }
85880 +
85881 + return 0;
85882 +}
85883 +
85884 +static unsigned int execute_stackleak_final(void)
85885 +{
85886 + rtx insn;
85887 +
85888 + if (cfun->calls_alloca)
85889 + return 0;
85890 +
85891 + // keep calls only if function frame is big enough
85892 + if (get_frame_size() >= track_frame_size)
85893 + return 0;
85894 +
85895 + // 1. find pax_track_stack calls
85896 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
85897 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
85898 + rtx body;
85899 +
85900 + if (!CALL_P(insn))
85901 + continue;
85902 + body = PATTERN(insn);
85903 + if (GET_CODE(body) != CALL)
85904 + continue;
85905 + body = XEXP(body, 0);
85906 + if (GET_CODE(body) != MEM)
85907 + continue;
85908 + body = XEXP(body, 0);
85909 + if (GET_CODE(body) != SYMBOL_REF)
85910 + continue;
85911 + if (strcmp(XSTR(body, 0), track_function))
85912 + continue;
85913 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
85914 + // 2. delete call
85915 + insn = delete_insn_and_edges(insn);
85916 +#if BUILDING_GCC_VERSION >= 4007
85917 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
85918 + insn = delete_insn_and_edges(insn);
85919 +#endif
85920 + }
85921 +
85922 +// print_simple_rtl(stderr, get_insns());
85923 +// print_rtl(stderr, get_insns());
85924 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
85925 +
85926 + return 0;
85927 +}
85928 +
85929 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85930 +{
85931 + const char * const plugin_name = plugin_info->base_name;
85932 + const int argc = plugin_info->argc;
85933 + const struct plugin_argument * const argv = plugin_info->argv;
85934 + int i;
85935 + struct register_pass_info stackleak_tree_instrument_pass_info = {
85936 + .pass = &stackleak_tree_instrument_pass.pass,
85937 +// .reference_pass_name = "tree_profile",
85938 + .reference_pass_name = "optimized",
85939 + .ref_pass_instance_number = 0,
85940 + .pos_op = PASS_POS_INSERT_BEFORE
85941 + };
85942 + struct register_pass_info stackleak_final_pass_info = {
85943 + .pass = &stackleak_final_rtl_opt_pass.pass,
85944 + .reference_pass_name = "final",
85945 + .ref_pass_instance_number = 0,
85946 + .pos_op = PASS_POS_INSERT_BEFORE
85947 + };
85948 +
85949 + if (!plugin_default_version_check(version, &gcc_version)) {
85950 + error(G_("incompatible gcc/plugin versions"));
85951 + return 1;
85952 + }
85953 +
85954 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
85955 +
85956 + for (i = 0; i < argc; ++i) {
85957 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
85958 + if (!argv[i].value) {
85959 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85960 + continue;
85961 + }
85962 + track_frame_size = atoi(argv[i].value);
85963 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
85964 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
85965 + continue;
85966 + }
85967 + if (!strcmp(argv[i].key, "initialize-locals")) {
85968 + if (argv[i].value) {
85969 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
85970 + continue;
85971 + }
85972 + init_locals = true;
85973 + continue;
85974 + }
85975 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85976 + }
85977 +
85978 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
85979 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
85980 +
85981 + return 0;
85982 +}
85983 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
85984 index 6789d78..4afd019 100644
85985 --- a/tools/perf/util/include/asm/alternative-asm.h
85986 +++ b/tools/perf/util/include/asm/alternative-asm.h
85987 @@ -5,4 +5,7 @@
85988
85989 #define altinstruction_entry #
85990
85991 + .macro pax_force_retaddr rip=0, reload=0
85992 + .endm
85993 +
85994 #endif
85995 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
85996 index af0f22f..9a7d479 100644
85997 --- a/usr/gen_init_cpio.c
85998 +++ b/usr/gen_init_cpio.c
85999 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
86000 int retval;
86001 int rc = -1;
86002 int namesize;
86003 - int i;
86004 + unsigned int i;
86005
86006 mode |= S_IFREG;
86007
86008 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
86009 *env_var = *expanded = '\0';
86010 strncat(env_var, start + 2, end - start - 2);
86011 strncat(expanded, new_location, start - new_location);
86012 - strncat(expanded, getenv(env_var), PATH_MAX);
86013 - strncat(expanded, end + 1, PATH_MAX);
86014 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
86015 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
86016 strncpy(new_location, expanded, PATH_MAX);
86017 + new_location[PATH_MAX] = 0;
86018 } else
86019 break;
86020 }
86021 diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
86022 index a457d21..fec1723 100644
86023 --- a/virt/kvm/iommu.c
86024 +++ b/virt/kvm/iommu.c
86025 @@ -310,6 +310,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
86026 }
86027 }
86028
86029 +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
86030 +{
86031 + kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
86032 +}
86033 +
86034 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
86035 {
86036 int idx;
86037 @@ -320,7 +325,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
86038 slots = kvm_memslots(kvm);
86039
86040 kvm_for_each_memslot(memslot, slots)
86041 - kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
86042 + kvm_iommu_unmap_pages(kvm, memslot);
86043
86044 srcu_read_unlock(&kvm->srcu, idx);
86045
86046 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
86047 index a91f980..527711d 100644
86048 --- a/virt/kvm/kvm_main.c
86049 +++ b/virt/kvm/kvm_main.c
86050 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
86051
86052 static cpumask_var_t cpus_hardware_enabled;
86053 static int kvm_usage_count = 0;
86054 -static atomic_t hardware_enable_failed;
86055 +static atomic_unchecked_t hardware_enable_failed;
86056
86057 struct kmem_cache *kvm_vcpu_cache;
86058 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
86059 @@ -873,12 +873,13 @@ skip_lpage:
86060 if (r)
86061 goto out_free;
86062
86063 - /* map the pages in iommu page table */
86064 + /* map/unmap the pages in iommu page table */
86065 if (npages) {
86066 r = kvm_iommu_map_pages(kvm, &new);
86067 if (r)
86068 goto out_free;
86069 - }
86070 + } else
86071 + kvm_iommu_unmap_pages(kvm, &old);
86072
86073 r = -ENOMEM;
86074 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
86075 @@ -2312,7 +2313,7 @@ static void hardware_enable_nolock(void *junk)
86076
86077 if (r) {
86078 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
86079 - atomic_inc(&hardware_enable_failed);
86080 + atomic_inc_unchecked(&hardware_enable_failed);
86081 printk(KERN_INFO "kvm: enabling virtualization on "
86082 "CPU%d failed\n", cpu);
86083 }
86084 @@ -2366,10 +2367,10 @@ static int hardware_enable_all(void)
86085
86086 kvm_usage_count++;
86087 if (kvm_usage_count == 1) {
86088 - atomic_set(&hardware_enable_failed, 0);
86089 + atomic_set_unchecked(&hardware_enable_failed, 0);
86090 on_each_cpu(hardware_enable_nolock, NULL, 1);
86091
86092 - if (atomic_read(&hardware_enable_failed)) {
86093 + if (atomic_read_unchecked(&hardware_enable_failed)) {
86094 hardware_disable_all_nolock();
86095 r = -EBUSY;
86096 }
86097 @@ -2732,7 +2733,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
86098 kvm_arch_vcpu_put(vcpu);
86099 }
86100
86101 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86102 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86103 struct module *module)
86104 {
86105 int r;
86106 @@ -2795,7 +2796,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86107 if (!vcpu_align)
86108 vcpu_align = __alignof__(struct kvm_vcpu);
86109 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
86110 - 0, NULL);
86111 + SLAB_USERCOPY, NULL);
86112 if (!kvm_vcpu_cache) {
86113 r = -ENOMEM;
86114 goto out_free_3;
86115 @@ -2805,9 +2806,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86116 if (r)
86117 goto out_free;
86118
86119 - kvm_chardev_ops.owner = module;
86120 - kvm_vm_fops.owner = module;
86121 - kvm_vcpu_fops.owner = module;
86122 + pax_open_kernel();
86123 + *(void **)&kvm_chardev_ops.owner = module;
86124 + *(void **)&kvm_vm_fops.owner = module;
86125 + *(void **)&kvm_vcpu_fops.owner = module;
86126 + pax_close_kernel();
86127
86128 r = misc_register(&kvm_dev);
86129 if (r) {