]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.9-201203022148.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.9-201203022148.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 5f1739b..1831396 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,48 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
234 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
235 +endif
236 +ifdef CONFIG_CHECKER_PLUGIN
237 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
238 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
239 +endif
240 +endif
241 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
242 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
243 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
244 +ifeq ($(KBUILD_EXTMOD),)
245 +gcc-plugins:
246 + $(Q)$(MAKE) $(build)=tools/gcc
247 +else
248 +gcc-plugins: ;
249 +endif
250 +else
251 +gcc-plugins:
252 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
253 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
254 +else
255 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
256 +endif
257 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
258 +endif
259 +endif
260 +
261 include $(srctree)/arch/$(SRCARCH)/Makefile
262
263 ifneq ($(CONFIG_FRAME_WARN),0)
264 @@ -708,7 +751,7 @@ export mod_strip_cmd
265
266
267 ifeq ($(KBUILD_EXTMOD),)
268 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
269 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
270
271 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
272 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
273 @@ -932,6 +975,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
274
275 # The actual objects are generated when descending,
276 # make sure no implicit rule kicks in
277 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
278 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
279 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280
281 # Handle descending into subdirectories listed in $(vmlinux-dirs)
282 @@ -941,7 +986,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
283 # Error messages still appears in the original language
284
285 PHONY += $(vmlinux-dirs)
286 -$(vmlinux-dirs): prepare scripts
287 +$(vmlinux-dirs): gcc-plugins prepare scripts
288 $(Q)$(MAKE) $(build)=$@
289
290 # Store (new) KERNELRELASE string in include/config/kernel.release
291 @@ -985,6 +1030,7 @@ prepare0: archprepare FORCE
292 $(Q)$(MAKE) $(build)=.
293
294 # All the preparing..
295 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
296 prepare: prepare0
297
298 # Generate some files
299 @@ -1086,6 +1132,8 @@ all: modules
300 # using awk while concatenating to the final file.
301
302 PHONY += modules
303 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
304 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
305 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
306 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
307 @$(kecho) ' Building modules, stage 2.';
308 @@ -1101,7 +1149,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
309
310 # Target to prepare building external modules
311 PHONY += modules_prepare
312 -modules_prepare: prepare scripts
313 +modules_prepare: gcc-plugins prepare scripts
314
315 # Target to install modules
316 PHONY += modules_install
317 @@ -1198,6 +1246,7 @@ distclean: mrproper
318 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
319 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
320 -o -name '.*.rej' \
321 + -o -name '.*.rej' -o -name '*.so' \
322 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
323 -type f -print | xargs rm -f
324
325 @@ -1358,6 +1407,8 @@ PHONY += $(module-dirs) modules
326 $(module-dirs): crmodverdir $(objtree)/Module.symvers
327 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
328
329 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
330 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
331 modules: $(module-dirs)
332 @$(kecho) ' Building modules, stage 2.';
333 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
334 @@ -1484,17 +1535,21 @@ else
335 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
336 endif
337
338 -%.s: %.c prepare scripts FORCE
339 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 +%.s: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.i: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.o: %.c prepare scripts FORCE
346 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.o: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.lst: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.s: %.S prepare scripts FORCE
353 +%.s: %.S gcc-plugins prepare scripts FORCE
354 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
355 -%.o: %.S prepare scripts FORCE
356 +%.o: %.S gcc-plugins prepare scripts FORCE
357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
358 %.symtypes: %.c prepare scripts FORCE
359 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
360 @@ -1504,11 +1559,15 @@ endif
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364 -%/: prepare scripts FORCE
365 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 +%/: gcc-plugins prepare scripts FORCE
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%.ko: prepare scripts FORCE
372 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%.ko: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir) $(@:.ko=.o)
378 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
379 index 640f909..48b6597 100644
380 --- a/arch/alpha/include/asm/atomic.h
381 +++ b/arch/alpha/include/asm/atomic.h
382 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
383 #define atomic_dec(v) atomic_sub(1,(v))
384 #define atomic64_dec(v) atomic64_sub(1,(v))
385
386 +#define atomic64_read_unchecked(v) atomic64_read(v)
387 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
388 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
389 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
390 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
391 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
392 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
393 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
394 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
395 +
396 #define smp_mb__before_atomic_dec() smp_mb()
397 #define smp_mb__after_atomic_dec() smp_mb()
398 #define smp_mb__before_atomic_inc() smp_mb()
399 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
400 index da5449e..7418343 100644
401 --- a/arch/alpha/include/asm/elf.h
402 +++ b/arch/alpha/include/asm/elf.h
403 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
404
405 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
406
407 +#ifdef CONFIG_PAX_ASLR
408 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
409 +
410 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
411 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
412 +#endif
413 +
414 /* $0 is set by ld.so to a pointer to a function which might be
415 registered using atexit. This provides a mean for the dynamic
416 linker to call DT_FINI functions for shared libraries that have
417 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
418 index de98a73..bd4f1f8 100644
419 --- a/arch/alpha/include/asm/pgtable.h
420 +++ b/arch/alpha/include/asm/pgtable.h
421 @@ -101,6 +101,17 @@ struct vm_area_struct;
422 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
423 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
424 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
425 +
426 +#ifdef CONFIG_PAX_PAGEEXEC
427 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
428 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
429 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
430 +#else
431 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
432 +# define PAGE_COPY_NOEXEC PAGE_COPY
433 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
434 +#endif
435 +
436 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
437
438 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
439 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
440 index 2fd00b7..cfd5069 100644
441 --- a/arch/alpha/kernel/module.c
442 +++ b/arch/alpha/kernel/module.c
443 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
444
445 /* The small sections were sorted to the end of the segment.
446 The following should definitely cover them. */
447 - gp = (u64)me->module_core + me->core_size - 0x8000;
448 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
449 got = sechdrs[me->arch.gotsecindex].sh_addr;
450
451 for (i = 0; i < n; i++) {
452 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
453 index 01e8715..be0e80f 100644
454 --- a/arch/alpha/kernel/osf_sys.c
455 +++ b/arch/alpha/kernel/osf_sys.c
456 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
457 /* At this point: (!vma || addr < vma->vm_end). */
458 if (limit - len < addr)
459 return -ENOMEM;
460 - if (!vma || addr + len <= vma->vm_start)
461 + if (check_heap_stack_gap(vma, addr, len))
462 return addr;
463 addr = vma->vm_end;
464 vma = vma->vm_next;
465 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
466 merely specific addresses, but regions of memory -- perhaps
467 this feature should be incorporated into all ports? */
468
469 +#ifdef CONFIG_PAX_RANDMMAP
470 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
471 +#endif
472 +
473 if (addr) {
474 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
475 if (addr != (unsigned long) -ENOMEM)
476 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
477 }
478
479 /* Next, try allocating at TASK_UNMAPPED_BASE. */
480 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
481 - len, limit);
482 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
483 +
484 if (addr != (unsigned long) -ENOMEM)
485 return addr;
486
487 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
488 index fadd5f8..904e73a 100644
489 --- a/arch/alpha/mm/fault.c
490 +++ b/arch/alpha/mm/fault.c
491 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
492 __reload_thread(pcb);
493 }
494
495 +#ifdef CONFIG_PAX_PAGEEXEC
496 +/*
497 + * PaX: decide what to do with offenders (regs->pc = fault address)
498 + *
499 + * returns 1 when task should be killed
500 + * 2 when patched PLT trampoline was detected
501 + * 3 when unpatched PLT trampoline was detected
502 + */
503 +static int pax_handle_fetch_fault(struct pt_regs *regs)
504 +{
505 +
506 +#ifdef CONFIG_PAX_EMUPLT
507 + int err;
508 +
509 + do { /* PaX: patched PLT emulation #1 */
510 + unsigned int ldah, ldq, jmp;
511 +
512 + err = get_user(ldah, (unsigned int *)regs->pc);
513 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
514 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
515 +
516 + if (err)
517 + break;
518 +
519 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
521 + jmp == 0x6BFB0000U)
522 + {
523 + unsigned long r27, addr;
524 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
526 +
527 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528 + err = get_user(r27, (unsigned long *)addr);
529 + if (err)
530 + break;
531 +
532 + regs->r27 = r27;
533 + regs->pc = r27;
534 + return 2;
535 + }
536 + } while (0);
537 +
538 + do { /* PaX: patched PLT emulation #2 */
539 + unsigned int ldah, lda, br;
540 +
541 + err = get_user(ldah, (unsigned int *)regs->pc);
542 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
543 + err |= get_user(br, (unsigned int *)(regs->pc+8));
544 +
545 + if (err)
546 + break;
547 +
548 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
549 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
550 + (br & 0xFFE00000U) == 0xC3E00000U)
551 + {
552 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
553 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
554 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
555 +
556 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
557 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
558 + return 2;
559 + }
560 + } while (0);
561 +
562 + do { /* PaX: unpatched PLT emulation */
563 + unsigned int br;
564 +
565 + err = get_user(br, (unsigned int *)regs->pc);
566 +
567 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
568 + unsigned int br2, ldq, nop, jmp;
569 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
570 +
571 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
572 + err = get_user(br2, (unsigned int *)addr);
573 + err |= get_user(ldq, (unsigned int *)(addr+4));
574 + err |= get_user(nop, (unsigned int *)(addr+8));
575 + err |= get_user(jmp, (unsigned int *)(addr+12));
576 + err |= get_user(resolver, (unsigned long *)(addr+16));
577 +
578 + if (err)
579 + break;
580 +
581 + if (br2 == 0xC3600000U &&
582 + ldq == 0xA77B000CU &&
583 + nop == 0x47FF041FU &&
584 + jmp == 0x6B7B0000U)
585 + {
586 + regs->r28 = regs->pc+4;
587 + regs->r27 = addr+16;
588 + regs->pc = resolver;
589 + return 3;
590 + }
591 + }
592 + } while (0);
593 +#endif
594 +
595 + return 1;
596 +}
597 +
598 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
599 +{
600 + unsigned long i;
601 +
602 + printk(KERN_ERR "PAX: bytes at PC: ");
603 + for (i = 0; i < 5; i++) {
604 + unsigned int c;
605 + if (get_user(c, (unsigned int *)pc+i))
606 + printk(KERN_CONT "???????? ");
607 + else
608 + printk(KERN_CONT "%08x ", c);
609 + }
610 + printk("\n");
611 +}
612 +#endif
613
614 /*
615 * This routine handles page faults. It determines the address,
616 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
617 good_area:
618 si_code = SEGV_ACCERR;
619 if (cause < 0) {
620 - if (!(vma->vm_flags & VM_EXEC))
621 + if (!(vma->vm_flags & VM_EXEC)) {
622 +
623 +#ifdef CONFIG_PAX_PAGEEXEC
624 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
625 + goto bad_area;
626 +
627 + up_read(&mm->mmap_sem);
628 + switch (pax_handle_fetch_fault(regs)) {
629 +
630 +#ifdef CONFIG_PAX_EMUPLT
631 + case 2:
632 + case 3:
633 + return;
634 +#endif
635 +
636 + }
637 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
638 + do_group_exit(SIGKILL);
639 +#else
640 goto bad_area;
641 +#endif
642 +
643 + }
644 } else if (!cause) {
645 /* Allow reads even for write-only mappings */
646 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
647 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
648 index 86976d0..6610950 100644
649 --- a/arch/arm/include/asm/atomic.h
650 +++ b/arch/arm/include/asm/atomic.h
651 @@ -15,6 +15,10 @@
652 #include <linux/types.h>
653 #include <asm/system.h>
654
655 +#ifdef CONFIG_GENERIC_ATOMIC64
656 +#include <asm-generic/atomic64.h>
657 +#endif
658 +
659 #define ATOMIC_INIT(i) { (i) }
660
661 #ifdef __KERNEL__
662 @@ -239,6 +243,14 @@ typedef struct {
663 u64 __aligned(8) counter;
664 } atomic64_t;
665
666 +#ifdef CONFIG_PAX_REFCOUNT
667 +typedef struct {
668 + u64 __aligned(8) counter;
669 +} atomic64_unchecked_t;
670 +#else
671 +typedef atomic64_t atomic64_unchecked_t;
672 +#endif
673 +
674 #define ATOMIC64_INIT(i) { (i) }
675
676 static inline u64 atomic64_read(atomic64_t *v)
677 @@ -459,6 +471,16 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
678 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
679 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
680
681 +#define atomic64_read_unchecked(v) atomic64_read(v)
682 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
683 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
684 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
685 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
686 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
687 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
688 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
689 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
690 +
691 #endif /* !CONFIG_GENERIC_ATOMIC64 */
692 #endif
693 #endif
694 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
695 index 0e9ce8d..6ef1e03 100644
696 --- a/arch/arm/include/asm/elf.h
697 +++ b/arch/arm/include/asm/elf.h
698 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
699 the loader. We need to make sure that it is out of the way of the program
700 that it will "exec", and that there is sufficient room for the brk. */
701
702 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
703 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
704 +
705 +#ifdef CONFIG_PAX_ASLR
706 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
707 +
708 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
709 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
710 +#endif
711
712 /* When the program starts, a1 contains a pointer to a function to be
713 registered with atexit, as per the SVR4 ABI. A value of 0 means we
714 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
715 extern void elf_set_personality(const struct elf32_hdr *);
716 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
717
718 -struct mm_struct;
719 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
720 -#define arch_randomize_brk arch_randomize_brk
721 -
722 extern int vectors_user_mapping(void);
723 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
724 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
725 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
726 index e51b1e8..32a3113 100644
727 --- a/arch/arm/include/asm/kmap_types.h
728 +++ b/arch/arm/include/asm/kmap_types.h
729 @@ -21,6 +21,7 @@ enum km_type {
730 KM_L1_CACHE,
731 KM_L2_CACHE,
732 KM_KDB,
733 + KM_CLEARPAGE,
734 KM_TYPE_NR
735 };
736
737 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
738 index b293616..96310e5 100644
739 --- a/arch/arm/include/asm/uaccess.h
740 +++ b/arch/arm/include/asm/uaccess.h
741 @@ -22,6 +22,8 @@
742 #define VERIFY_READ 0
743 #define VERIFY_WRITE 1
744
745 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
746 +
747 /*
748 * The exception table consists of pairs of addresses: the first is the
749 * address of an instruction that is allowed to fault, and the second is
750 @@ -387,8 +389,23 @@ do { \
751
752
753 #ifdef CONFIG_MMU
754 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
755 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
756 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
757 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
758 +
759 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
760 +{
761 + if (!__builtin_constant_p(n))
762 + check_object_size(to, n, false);
763 + return ___copy_from_user(to, from, n);
764 +}
765 +
766 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
767 +{
768 + if (!__builtin_constant_p(n))
769 + check_object_size(from, n, true);
770 + return ___copy_to_user(to, from, n);
771 +}
772 +
773 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
774 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
775 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
776 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
777
778 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
779 {
780 + if ((long)n < 0)
781 + return n;
782 +
783 if (access_ok(VERIFY_READ, from, n))
784 n = __copy_from_user(to, from, n);
785 else /* security hole - plug it */
786 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
787
788 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
789 {
790 + if ((long)n < 0)
791 + return n;
792 +
793 if (access_ok(VERIFY_WRITE, to, n))
794 n = __copy_to_user(to, from, n);
795 return n;
796 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
797 index 5b0bce6..becd81c 100644
798 --- a/arch/arm/kernel/armksyms.c
799 +++ b/arch/arm/kernel/armksyms.c
800 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
801 #ifdef CONFIG_MMU
802 EXPORT_SYMBOL(copy_page);
803
804 -EXPORT_SYMBOL(__copy_from_user);
805 -EXPORT_SYMBOL(__copy_to_user);
806 +EXPORT_SYMBOL(___copy_from_user);
807 +EXPORT_SYMBOL(___copy_to_user);
808 EXPORT_SYMBOL(__clear_user);
809
810 EXPORT_SYMBOL(__get_user_1);
811 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
812 index 3d0c6fb..3dcae52 100644
813 --- a/arch/arm/kernel/process.c
814 +++ b/arch/arm/kernel/process.c
815 @@ -28,7 +28,6 @@
816 #include <linux/tick.h>
817 #include <linux/utsname.h>
818 #include <linux/uaccess.h>
819 -#include <linux/random.h>
820 #include <linux/hw_breakpoint.h>
821 #include <linux/cpuidle.h>
822
823 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
824 return 0;
825 }
826
827 -unsigned long arch_randomize_brk(struct mm_struct *mm)
828 -{
829 - unsigned long range_end = mm->brk + 0x02000000;
830 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
831 -}
832 -
833 #ifdef CONFIG_MMU
834 /*
835 * The vectors page is always readable from user space for the
836 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
837 index 99a5727..a3d5bb1 100644
838 --- a/arch/arm/kernel/traps.c
839 +++ b/arch/arm/kernel/traps.c
840 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
841
842 static DEFINE_RAW_SPINLOCK(die_lock);
843
844 +extern void gr_handle_kernel_exploit(void);
845 +
846 /*
847 * This function is protected against re-entrancy.
848 */
849 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
850 panic("Fatal exception in interrupt");
851 if (panic_on_oops)
852 panic("Fatal exception");
853 +
854 + gr_handle_kernel_exploit();
855 +
856 if (ret != NOTIFY_STOP)
857 do_exit(SIGSEGV);
858 }
859 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
860 index 66a477a..bee61d3 100644
861 --- a/arch/arm/lib/copy_from_user.S
862 +++ b/arch/arm/lib/copy_from_user.S
863 @@ -16,7 +16,7 @@
864 /*
865 * Prototype:
866 *
867 - * size_t __copy_from_user(void *to, const void *from, size_t n)
868 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
869 *
870 * Purpose:
871 *
872 @@ -84,11 +84,11 @@
873
874 .text
875
876 -ENTRY(__copy_from_user)
877 +ENTRY(___copy_from_user)
878
879 #include "copy_template.S"
880
881 -ENDPROC(__copy_from_user)
882 +ENDPROC(___copy_from_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
887 index d066df6..df28194 100644
888 --- a/arch/arm/lib/copy_to_user.S
889 +++ b/arch/arm/lib/copy_to_user.S
890 @@ -16,7 +16,7 @@
891 /*
892 * Prototype:
893 *
894 - * size_t __copy_to_user(void *to, const void *from, size_t n)
895 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
896 *
897 * Purpose:
898 *
899 @@ -88,11 +88,11 @@
900 .text
901
902 ENTRY(__copy_to_user_std)
903 -WEAK(__copy_to_user)
904 +WEAK(___copy_to_user)
905
906 #include "copy_template.S"
907
908 -ENDPROC(__copy_to_user)
909 +ENDPROC(___copy_to_user)
910 ENDPROC(__copy_to_user_std)
911
912 .pushsection .fixup,"ax"
913 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
914 index d0ece2a..5ae2f39 100644
915 --- a/arch/arm/lib/uaccess.S
916 +++ b/arch/arm/lib/uaccess.S
917 @@ -20,7 +20,7 @@
918
919 #define PAGE_SHIFT 12
920
921 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
922 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
923 * Purpose : copy a block to user memory from kernel memory
924 * Params : to - user memory
925 * : from - kernel memory
926 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
927 sub r2, r2, ip
928 b .Lc2u_dest_aligned
929
930 -ENTRY(__copy_to_user)
931 +ENTRY(___copy_to_user)
932 stmfd sp!, {r2, r4 - r7, lr}
933 cmp r2, #4
934 blt .Lc2u_not_enough
935 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
936 ldrgtb r3, [r1], #0
937 USER( T(strgtb) r3, [r0], #1) @ May fault
938 b .Lc2u_finished
939 -ENDPROC(__copy_to_user)
940 +ENDPROC(___copy_to_user)
941
942 .pushsection .fixup,"ax"
943 .align 0
944 9001: ldmfd sp!, {r0, r4 - r7, pc}
945 .popsection
946
947 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
948 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
949 * Purpose : copy a block from user memory to kernel memory
950 * Params : to - kernel memory
951 * : from - user memory
952 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
953 sub r2, r2, ip
954 b .Lcfu_dest_aligned
955
956 -ENTRY(__copy_from_user)
957 +ENTRY(___copy_from_user)
958 stmfd sp!, {r0, r2, r4 - r7, lr}
959 cmp r2, #4
960 blt .Lcfu_not_enough
961 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
962 USER( T(ldrgtb) r3, [r1], #1) @ May fault
963 strgtb r3, [r0], #1
964 b .Lcfu_finished
965 -ENDPROC(__copy_from_user)
966 +ENDPROC(___copy_from_user)
967
968 .pushsection .fixup,"ax"
969 .align 0
970 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
971 index 025f742..8432b08 100644
972 --- a/arch/arm/lib/uaccess_with_memcpy.c
973 +++ b/arch/arm/lib/uaccess_with_memcpy.c
974 @@ -104,7 +104,7 @@ out:
975 }
976
977 unsigned long
978 -__copy_to_user(void __user *to, const void *from, unsigned long n)
979 +___copy_to_user(void __user *to, const void *from, unsigned long n)
980 {
981 /*
982 * This test is stubbed out of the main function above to keep
983 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
984 index 2b2d51c..0127490 100644
985 --- a/arch/arm/mach-ux500/mbox-db5500.c
986 +++ b/arch/arm/mach-ux500/mbox-db5500.c
987 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
988 return sprintf(buf, "0x%X\n", mbox_value);
989 }
990
991 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
992 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
993
994 static int mbox_show(struct seq_file *s, void *data)
995 {
996 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
997 index aa33949..b242a2f 100644
998 --- a/arch/arm/mm/fault.c
999 +++ b/arch/arm/mm/fault.c
1000 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1001 }
1002 #endif
1003
1004 +#ifdef CONFIG_PAX_PAGEEXEC
1005 + if (fsr & FSR_LNX_PF) {
1006 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1007 + do_group_exit(SIGKILL);
1008 + }
1009 +#endif
1010 +
1011 tsk->thread.address = addr;
1012 tsk->thread.error_code = fsr;
1013 tsk->thread.trap_no = 14;
1014 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1015 }
1016 #endif /* CONFIG_MMU */
1017
1018 +#ifdef CONFIG_PAX_PAGEEXEC
1019 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1020 +{
1021 + long i;
1022 +
1023 + printk(KERN_ERR "PAX: bytes at PC: ");
1024 + for (i = 0; i < 20; i++) {
1025 + unsigned char c;
1026 + if (get_user(c, (__force unsigned char __user *)pc+i))
1027 + printk(KERN_CONT "?? ");
1028 + else
1029 + printk(KERN_CONT "%02x ", c);
1030 + }
1031 + printk("\n");
1032 +
1033 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1034 + for (i = -1; i < 20; i++) {
1035 + unsigned long c;
1036 + if (get_user(c, (__force unsigned long __user *)sp+i))
1037 + printk(KERN_CONT "???????? ");
1038 + else
1039 + printk(KERN_CONT "%08lx ", c);
1040 + }
1041 + printk("\n");
1042 +}
1043 +#endif
1044 +
1045 /*
1046 * First Level Translation Fault Handler
1047 *
1048 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1049 index 44b628e..623ee2a 100644
1050 --- a/arch/arm/mm/mmap.c
1051 +++ b/arch/arm/mm/mmap.c
1052 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1053 if (len > TASK_SIZE)
1054 return -ENOMEM;
1055
1056 +#ifdef CONFIG_PAX_RANDMMAP
1057 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1058 +#endif
1059 +
1060 if (addr) {
1061 if (do_align)
1062 addr = COLOUR_ALIGN(addr, pgoff);
1063 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1064 addr = PAGE_ALIGN(addr);
1065
1066 vma = find_vma(mm, addr);
1067 - if (TASK_SIZE - len >= addr &&
1068 - (!vma || addr + len <= vma->vm_start))
1069 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1070 return addr;
1071 }
1072 if (len > mm->cached_hole_size) {
1073 - start_addr = addr = mm->free_area_cache;
1074 + start_addr = addr = mm->free_area_cache;
1075 } else {
1076 - start_addr = addr = TASK_UNMAPPED_BASE;
1077 - mm->cached_hole_size = 0;
1078 + start_addr = addr = mm->mmap_base;
1079 + mm->cached_hole_size = 0;
1080 }
1081 /* 8 bits of randomness in 20 address space bits */
1082 if ((current->flags & PF_RANDOMIZE) &&
1083 @@ -89,14 +92,14 @@ full_search:
1084 * Start a new search - just in case we missed
1085 * some holes.
1086 */
1087 - if (start_addr != TASK_UNMAPPED_BASE) {
1088 - start_addr = addr = TASK_UNMAPPED_BASE;
1089 + if (start_addr != mm->mmap_base) {
1090 + start_addr = addr = mm->mmap_base;
1091 mm->cached_hole_size = 0;
1092 goto full_search;
1093 }
1094 return -ENOMEM;
1095 }
1096 - if (!vma || addr + len <= vma->vm_start) {
1097 + if (check_heap_stack_gap(vma, addr, len)) {
1098 /*
1099 * Remember the place where we stopped the search:
1100 */
1101 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1102 index 3b3159b..425ea94 100644
1103 --- a/arch/avr32/include/asm/elf.h
1104 +++ b/arch/avr32/include/asm/elf.h
1105 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1106 the loader. We need to make sure that it is out of the way of the program
1107 that it will "exec", and that there is sufficient room for the brk. */
1108
1109 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1110 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1111
1112 +#ifdef CONFIG_PAX_ASLR
1113 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1114 +
1115 +#define PAX_DELTA_MMAP_LEN 15
1116 +#define PAX_DELTA_STACK_LEN 15
1117 +#endif
1118
1119 /* This yields a mask that user programs can use to figure out what
1120 instruction set this CPU supports. This could be done in user space,
1121 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1122 index b7f5c68..556135c 100644
1123 --- a/arch/avr32/include/asm/kmap_types.h
1124 +++ b/arch/avr32/include/asm/kmap_types.h
1125 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1126 D(11) KM_IRQ1,
1127 D(12) KM_SOFTIRQ0,
1128 D(13) KM_SOFTIRQ1,
1129 -D(14) KM_TYPE_NR
1130 +D(14) KM_CLEARPAGE,
1131 +D(15) KM_TYPE_NR
1132 };
1133
1134 #undef D
1135 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1136 index f7040a1..db9f300 100644
1137 --- a/arch/avr32/mm/fault.c
1138 +++ b/arch/avr32/mm/fault.c
1139 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1140
1141 int exception_trace = 1;
1142
1143 +#ifdef CONFIG_PAX_PAGEEXEC
1144 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1145 +{
1146 + unsigned long i;
1147 +
1148 + printk(KERN_ERR "PAX: bytes at PC: ");
1149 + for (i = 0; i < 20; i++) {
1150 + unsigned char c;
1151 + if (get_user(c, (unsigned char *)pc+i))
1152 + printk(KERN_CONT "???????? ");
1153 + else
1154 + printk(KERN_CONT "%02x ", c);
1155 + }
1156 + printk("\n");
1157 +}
1158 +#endif
1159 +
1160 /*
1161 * This routine handles page faults. It determines the address and the
1162 * problem, and then passes it off to one of the appropriate routines.
1163 @@ -156,6 +173,16 @@ bad_area:
1164 up_read(&mm->mmap_sem);
1165
1166 if (user_mode(regs)) {
1167 +
1168 +#ifdef CONFIG_PAX_PAGEEXEC
1169 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1170 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1171 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1172 + do_group_exit(SIGKILL);
1173 + }
1174 + }
1175 +#endif
1176 +
1177 if (exception_trace && printk_ratelimit())
1178 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1179 "sp %08lx ecr %lu\n",
1180 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1181 index 0d8a7d6..d0c9ff5 100644
1182 --- a/arch/frv/include/asm/atomic.h
1183 +++ b/arch/frv/include/asm/atomic.h
1184 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1185 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1186 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1187
1188 +#define atomic64_read_unchecked(v) atomic64_read(v)
1189 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1190 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1191 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1192 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1193 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1194 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1195 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1196 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1197 +
1198 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
1199 {
1200 int c, old;
1201 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1202 index f8e16b2..c73ff79 100644
1203 --- a/arch/frv/include/asm/kmap_types.h
1204 +++ b/arch/frv/include/asm/kmap_types.h
1205 @@ -23,6 +23,7 @@ enum km_type {
1206 KM_IRQ1,
1207 KM_SOFTIRQ0,
1208 KM_SOFTIRQ1,
1209 + KM_CLEARPAGE,
1210 KM_TYPE_NR
1211 };
1212
1213 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1214 index 385fd30..6c3d97e 100644
1215 --- a/arch/frv/mm/elf-fdpic.c
1216 +++ b/arch/frv/mm/elf-fdpic.c
1217 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1218 if (addr) {
1219 addr = PAGE_ALIGN(addr);
1220 vma = find_vma(current->mm, addr);
1221 - if (TASK_SIZE - len >= addr &&
1222 - (!vma || addr + len <= vma->vm_start))
1223 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1224 goto success;
1225 }
1226
1227 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1228 for (; vma; vma = vma->vm_next) {
1229 if (addr > limit)
1230 break;
1231 - if (addr + len <= vma->vm_start)
1232 + if (check_heap_stack_gap(vma, addr, len))
1233 goto success;
1234 addr = vma->vm_end;
1235 }
1236 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1237 for (; vma; vma = vma->vm_next) {
1238 if (addr > limit)
1239 break;
1240 - if (addr + len <= vma->vm_start)
1241 + if (check_heap_stack_gap(vma, addr, len))
1242 goto success;
1243 addr = vma->vm_end;
1244 }
1245 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1246 index 3fad89e..3047da5 100644
1247 --- a/arch/ia64/include/asm/atomic.h
1248 +++ b/arch/ia64/include/asm/atomic.h
1249 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1250 #define atomic64_inc(v) atomic64_add(1, (v))
1251 #define atomic64_dec(v) atomic64_sub(1, (v))
1252
1253 +#define atomic64_read_unchecked(v) atomic64_read(v)
1254 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1255 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1256 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1257 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1258 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1259 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1260 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1261 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1262 +
1263 /* Atomic operations are already serializing */
1264 #define smp_mb__before_atomic_dec() barrier()
1265 #define smp_mb__after_atomic_dec() barrier()
1266 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1267 index b5298eb..67c6e62 100644
1268 --- a/arch/ia64/include/asm/elf.h
1269 +++ b/arch/ia64/include/asm/elf.h
1270 @@ -42,6 +42,13 @@
1271 */
1272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1273
1274 +#ifdef CONFIG_PAX_ASLR
1275 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1276 +
1277 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1278 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1279 +#endif
1280 +
1281 #define PT_IA_64_UNWIND 0x70000001
1282
1283 /* IA-64 relocations: */
1284 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1285 index 1a97af3..7529d31 100644
1286 --- a/arch/ia64/include/asm/pgtable.h
1287 +++ b/arch/ia64/include/asm/pgtable.h
1288 @@ -12,7 +12,7 @@
1289 * David Mosberger-Tang <davidm@hpl.hp.com>
1290 */
1291
1292 -
1293 +#include <linux/const.h>
1294 #include <asm/mman.h>
1295 #include <asm/page.h>
1296 #include <asm/processor.h>
1297 @@ -143,6 +143,17 @@
1298 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1299 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1300 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1301 +
1302 +#ifdef CONFIG_PAX_PAGEEXEC
1303 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1304 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1305 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1306 +#else
1307 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1308 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +#endif
1311 +
1312 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1313 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1314 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1315 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1316 index b77768d..e0795eb 100644
1317 --- a/arch/ia64/include/asm/spinlock.h
1318 +++ b/arch/ia64/include/asm/spinlock.h
1319 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1320 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1321
1322 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1323 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1324 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1325 }
1326
1327 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1328 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1329 index 449c8c0..432a3d2 100644
1330 --- a/arch/ia64/include/asm/uaccess.h
1331 +++ b/arch/ia64/include/asm/uaccess.h
1332 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1333 const void *__cu_from = (from); \
1334 long __cu_len = (n); \
1335 \
1336 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1337 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1338 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1339 __cu_len; \
1340 })
1341 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1342 long __cu_len = (n); \
1343 \
1344 __chk_user_ptr(__cu_from); \
1345 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1346 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1348 __cu_len; \
1349 })
1350 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1351 index 24603be..948052d 100644
1352 --- a/arch/ia64/kernel/module.c
1353 +++ b/arch/ia64/kernel/module.c
1354 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1355 void
1356 module_free (struct module *mod, void *module_region)
1357 {
1358 - if (mod && mod->arch.init_unw_table &&
1359 - module_region == mod->module_init) {
1360 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1361 unw_remove_unwind_table(mod->arch.init_unw_table);
1362 mod->arch.init_unw_table = NULL;
1363 }
1364 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1365 }
1366
1367 static inline int
1368 +in_init_rx (const struct module *mod, uint64_t addr)
1369 +{
1370 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1371 +}
1372 +
1373 +static inline int
1374 +in_init_rw (const struct module *mod, uint64_t addr)
1375 +{
1376 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1377 +}
1378 +
1379 +static inline int
1380 in_init (const struct module *mod, uint64_t addr)
1381 {
1382 - return addr - (uint64_t) mod->module_init < mod->init_size;
1383 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1384 +}
1385 +
1386 +static inline int
1387 +in_core_rx (const struct module *mod, uint64_t addr)
1388 +{
1389 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1390 +}
1391 +
1392 +static inline int
1393 +in_core_rw (const struct module *mod, uint64_t addr)
1394 +{
1395 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1396 }
1397
1398 static inline int
1399 in_core (const struct module *mod, uint64_t addr)
1400 {
1401 - return addr - (uint64_t) mod->module_core < mod->core_size;
1402 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1403 }
1404
1405 static inline int
1406 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1407 break;
1408
1409 case RV_BDREL:
1410 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1411 + if (in_init_rx(mod, val))
1412 + val -= (uint64_t) mod->module_init_rx;
1413 + else if (in_init_rw(mod, val))
1414 + val -= (uint64_t) mod->module_init_rw;
1415 + else if (in_core_rx(mod, val))
1416 + val -= (uint64_t) mod->module_core_rx;
1417 + else if (in_core_rw(mod, val))
1418 + val -= (uint64_t) mod->module_core_rw;
1419 break;
1420
1421 case RV_LTV:
1422 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1423 * addresses have been selected...
1424 */
1425 uint64_t gp;
1426 - if (mod->core_size > MAX_LTOFF)
1427 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1428 /*
1429 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1430 * at the end of the module.
1431 */
1432 - gp = mod->core_size - MAX_LTOFF / 2;
1433 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1434 else
1435 - gp = mod->core_size / 2;
1436 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1437 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1438 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1439 mod->arch.gp = gp;
1440 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1441 }
1442 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1443 index 609d500..7dde2a8 100644
1444 --- a/arch/ia64/kernel/sys_ia64.c
1445 +++ b/arch/ia64/kernel/sys_ia64.c
1446 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1447 if (REGION_NUMBER(addr) == RGN_HPAGE)
1448 addr = 0;
1449 #endif
1450 +
1451 +#ifdef CONFIG_PAX_RANDMMAP
1452 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1453 + addr = mm->free_area_cache;
1454 + else
1455 +#endif
1456 +
1457 if (!addr)
1458 addr = mm->free_area_cache;
1459
1460 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1461 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1462 /* At this point: (!vma || addr < vma->vm_end). */
1463 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1464 - if (start_addr != TASK_UNMAPPED_BASE) {
1465 + if (start_addr != mm->mmap_base) {
1466 /* Start a new search --- just in case we missed some holes. */
1467 - addr = TASK_UNMAPPED_BASE;
1468 + addr = mm->mmap_base;
1469 goto full_search;
1470 }
1471 return -ENOMEM;
1472 }
1473 - if (!vma || addr + len <= vma->vm_start) {
1474 + if (check_heap_stack_gap(vma, addr, len)) {
1475 /* Remember the address where we stopped this search: */
1476 mm->free_area_cache = addr + len;
1477 return addr;
1478 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1479 index 53c0ba0..2accdde 100644
1480 --- a/arch/ia64/kernel/vmlinux.lds.S
1481 +++ b/arch/ia64/kernel/vmlinux.lds.S
1482 @@ -199,7 +199,7 @@ SECTIONS {
1483 /* Per-cpu data: */
1484 . = ALIGN(PERCPU_PAGE_SIZE);
1485 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1486 - __phys_per_cpu_start = __per_cpu_load;
1487 + __phys_per_cpu_start = per_cpu_load;
1488 /*
1489 * ensure percpu data fits
1490 * into percpu page size
1491 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1492 index 20b3593..1ce77f0 100644
1493 --- a/arch/ia64/mm/fault.c
1494 +++ b/arch/ia64/mm/fault.c
1495 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1496 return pte_present(pte);
1497 }
1498
1499 +#ifdef CONFIG_PAX_PAGEEXEC
1500 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1501 +{
1502 + unsigned long i;
1503 +
1504 + printk(KERN_ERR "PAX: bytes at PC: ");
1505 + for (i = 0; i < 8; i++) {
1506 + unsigned int c;
1507 + if (get_user(c, (unsigned int *)pc+i))
1508 + printk(KERN_CONT "???????? ");
1509 + else
1510 + printk(KERN_CONT "%08x ", c);
1511 + }
1512 + printk("\n");
1513 +}
1514 +#endif
1515 +
1516 void __kprobes
1517 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1518 {
1519 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1520 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1521 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1522
1523 - if ((vma->vm_flags & mask) != mask)
1524 + if ((vma->vm_flags & mask) != mask) {
1525 +
1526 +#ifdef CONFIG_PAX_PAGEEXEC
1527 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1528 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1529 + goto bad_area;
1530 +
1531 + up_read(&mm->mmap_sem);
1532 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1533 + do_group_exit(SIGKILL);
1534 + }
1535 +#endif
1536 +
1537 goto bad_area;
1538
1539 + }
1540 +
1541 /*
1542 * If for any reason at all we couldn't handle the fault, make
1543 * sure we exit gracefully rather than endlessly redo the
1544 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1545 index 5ca674b..e0e1b70 100644
1546 --- a/arch/ia64/mm/hugetlbpage.c
1547 +++ b/arch/ia64/mm/hugetlbpage.c
1548 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1549 /* At this point: (!vmm || addr < vmm->vm_end). */
1550 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1551 return -ENOMEM;
1552 - if (!vmm || (addr + len) <= vmm->vm_start)
1553 + if (check_heap_stack_gap(vmm, addr, len))
1554 return addr;
1555 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1556 }
1557 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1558 index 00cb0e2..2ad8024 100644
1559 --- a/arch/ia64/mm/init.c
1560 +++ b/arch/ia64/mm/init.c
1561 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1562 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1563 vma->vm_end = vma->vm_start + PAGE_SIZE;
1564 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1565 +
1566 +#ifdef CONFIG_PAX_PAGEEXEC
1567 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1568 + vma->vm_flags &= ~VM_EXEC;
1569 +
1570 +#ifdef CONFIG_PAX_MPROTECT
1571 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1572 + vma->vm_flags &= ~VM_MAYEXEC;
1573 +#endif
1574 +
1575 + }
1576 +#endif
1577 +
1578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1579 down_write(&current->mm->mmap_sem);
1580 if (insert_vm_struct(current->mm, vma)) {
1581 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1582 index 82abd15..d95ae5d 100644
1583 --- a/arch/m32r/lib/usercopy.c
1584 +++ b/arch/m32r/lib/usercopy.c
1585 @@ -14,6 +14,9 @@
1586 unsigned long
1587 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1588 {
1589 + if ((long)n < 0)
1590 + return n;
1591 +
1592 prefetch(from);
1593 if (access_ok(VERIFY_WRITE, to, n))
1594 __copy_user(to,from,n);
1595 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1596 unsigned long
1597 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1598 {
1599 + if ((long)n < 0)
1600 + return n;
1601 +
1602 prefetchw(to);
1603 if (access_ok(VERIFY_READ, from, n))
1604 __copy_user_zeroing(to,from,n);
1605 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
1606 index 1d93f81..67794d0 100644
1607 --- a/arch/mips/include/asm/atomic.h
1608 +++ b/arch/mips/include/asm/atomic.h
1609 @@ -21,6 +21,10 @@
1610 #include <asm/war.h>
1611 #include <asm/system.h>
1612
1613 +#ifdef CONFIG_GENERIC_ATOMIC64
1614 +#include <asm-generic/atomic64.h>
1615 +#endif
1616 +
1617 #define ATOMIC_INIT(i) { (i) }
1618
1619 /*
1620 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1621 */
1622 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
1623
1624 +#define atomic64_read_unchecked(v) atomic64_read(v)
1625 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1626 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1627 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1628 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1629 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1630 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1631 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1632 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1633 +
1634 #endif /* CONFIG_64BIT */
1635
1636 /*
1637 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1638 index 455c0ac..ad65fbe 100644
1639 --- a/arch/mips/include/asm/elf.h
1640 +++ b/arch/mips/include/asm/elf.h
1641 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1642 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1643 #endif
1644
1645 +#ifdef CONFIG_PAX_ASLR
1646 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1647 +
1648 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1649 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1650 +#endif
1651 +
1652 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1653 struct linux_binprm;
1654 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1655 int uses_interp);
1656
1657 -struct mm_struct;
1658 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1659 -#define arch_randomize_brk arch_randomize_brk
1660 -
1661 #endif /* _ASM_ELF_H */
1662 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1663 index e59cd1a..8e329d6 100644
1664 --- a/arch/mips/include/asm/page.h
1665 +++ b/arch/mips/include/asm/page.h
1666 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1667 #ifdef CONFIG_CPU_MIPS32
1668 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1669 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1670 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1671 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1672 #else
1673 typedef struct { unsigned long long pte; } pte_t;
1674 #define pte_val(x) ((x).pte)
1675 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1676 index 6018c80..7c37203 100644
1677 --- a/arch/mips/include/asm/system.h
1678 +++ b/arch/mips/include/asm/system.h
1679 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1680 */
1681 #define __ARCH_WANT_UNLOCKED_CTXSW
1682
1683 -extern unsigned long arch_align_stack(unsigned long sp);
1684 +#define arch_align_stack(x) ((x) & ~0xfUL)
1685
1686 #endif /* _ASM_SYSTEM_H */
1687 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1688 index 9fdd8bc..4bd7f1a 100644
1689 --- a/arch/mips/kernel/binfmt_elfn32.c
1690 +++ b/arch/mips/kernel/binfmt_elfn32.c
1691 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1692 #undef ELF_ET_DYN_BASE
1693 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1694
1695 +#ifdef CONFIG_PAX_ASLR
1696 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1697 +
1698 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1699 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1700 +#endif
1701 +
1702 #include <asm/processor.h>
1703 #include <linux/module.h>
1704 #include <linux/elfcore.h>
1705 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1706 index ff44823..97f8906 100644
1707 --- a/arch/mips/kernel/binfmt_elfo32.c
1708 +++ b/arch/mips/kernel/binfmt_elfo32.c
1709 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1710 #undef ELF_ET_DYN_BASE
1711 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1712
1713 +#ifdef CONFIG_PAX_ASLR
1714 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1715 +
1716 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1717 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1718 +#endif
1719 +
1720 #include <asm/processor.h>
1721
1722 /*
1723 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1724 index c47f96e..661d418 100644
1725 --- a/arch/mips/kernel/process.c
1726 +++ b/arch/mips/kernel/process.c
1727 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1728 out:
1729 return pc;
1730 }
1731 -
1732 -/*
1733 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1734 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1735 - */
1736 -unsigned long arch_align_stack(unsigned long sp)
1737 -{
1738 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1739 - sp -= get_random_int() & ~PAGE_MASK;
1740 -
1741 - return sp & ALMASK;
1742 -}
1743 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1744 index 937cf33..adb39bb 100644
1745 --- a/arch/mips/mm/fault.c
1746 +++ b/arch/mips/mm/fault.c
1747 @@ -28,6 +28,23 @@
1748 #include <asm/highmem.h> /* For VMALLOC_END */
1749 #include <linux/kdebug.h>
1750
1751 +#ifdef CONFIG_PAX_PAGEEXEC
1752 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1753 +{
1754 + unsigned long i;
1755 +
1756 + printk(KERN_ERR "PAX: bytes at PC: ");
1757 + for (i = 0; i < 5; i++) {
1758 + unsigned int c;
1759 + if (get_user(c, (unsigned int *)pc+i))
1760 + printk(KERN_CONT "???????? ");
1761 + else
1762 + printk(KERN_CONT "%08x ", c);
1763 + }
1764 + printk("\n");
1765 +}
1766 +#endif
1767 +
1768 /*
1769 * This routine handles page faults. It determines the address,
1770 * and the problem, and then passes it off to one of the appropriate
1771 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1772 index 302d779..7d35bf8 100644
1773 --- a/arch/mips/mm/mmap.c
1774 +++ b/arch/mips/mm/mmap.c
1775 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1776 do_color_align = 1;
1777
1778 /* requesting a specific address */
1779 +
1780 +#ifdef CONFIG_PAX_RANDMMAP
1781 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1782 +#endif
1783 +
1784 if (addr) {
1785 if (do_color_align)
1786 addr = COLOUR_ALIGN(addr, pgoff);
1787 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1788 addr = PAGE_ALIGN(addr);
1789
1790 vma = find_vma(mm, addr);
1791 - if (TASK_SIZE - len >= addr &&
1792 - (!vma || addr + len <= vma->vm_start))
1793 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1794 return addr;
1795 }
1796
1797 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1798 /* At this point: (!vma || addr < vma->vm_end). */
1799 if (TASK_SIZE - len < addr)
1800 return -ENOMEM;
1801 - if (!vma || addr + len <= vma->vm_start)
1802 + if (check_heap_stack_gap(vmm, addr, len))
1803 return addr;
1804 addr = vma->vm_end;
1805 if (do_color_align)
1806 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1807 /* make sure it can fit in the remaining address space */
1808 if (likely(addr > len)) {
1809 vma = find_vma(mm, addr - len);
1810 - if (!vma || addr <= vma->vm_start) {
1811 + if (check_heap_stack_gap(vmm, addr - len, len))
1812 /* cache the address as a hint for next time */
1813 return mm->free_area_cache = addr - len;
1814 }
1815 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1816 * return with success:
1817 */
1818 vma = find_vma(mm, addr);
1819 - if (likely(!vma || addr + len <= vma->vm_start)) {
1820 + if (check_heap_stack_gap(vmm, addr, len)) {
1821 /* cache the address as a hint for next time */
1822 return mm->free_area_cache = addr;
1823 }
1824 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1825 mm->unmap_area = arch_unmap_area_topdown;
1826 }
1827 }
1828 -
1829 -static inline unsigned long brk_rnd(void)
1830 -{
1831 - unsigned long rnd = get_random_int();
1832 -
1833 - rnd = rnd << PAGE_SHIFT;
1834 - /* 8MB for 32bit, 256MB for 64bit */
1835 - if (TASK_IS_32BIT_ADDR)
1836 - rnd = rnd & 0x7ffffful;
1837 - else
1838 - rnd = rnd & 0xffffffful;
1839 -
1840 - return rnd;
1841 -}
1842 -
1843 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1844 -{
1845 - unsigned long base = mm->brk;
1846 - unsigned long ret;
1847 -
1848 - ret = PAGE_ALIGN(base + brk_rnd());
1849 -
1850 - if (ret < mm->brk)
1851 - return mm->brk;
1852 -
1853 - return ret;
1854 -}
1855 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
1856 index 4054b31..a10c105 100644
1857 --- a/arch/parisc/include/asm/atomic.h
1858 +++ b/arch/parisc/include/asm/atomic.h
1859 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
1860
1861 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1862
1863 +#define atomic64_read_unchecked(v) atomic64_read(v)
1864 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1865 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1866 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1867 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1868 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
1869 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1870 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
1871 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1872 +
1873 #endif /* !CONFIG_64BIT */
1874
1875
1876 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1877 index 19f6cb1..6c78cf2 100644
1878 --- a/arch/parisc/include/asm/elf.h
1879 +++ b/arch/parisc/include/asm/elf.h
1880 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1881
1882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1883
1884 +#ifdef CONFIG_PAX_ASLR
1885 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1886 +
1887 +#define PAX_DELTA_MMAP_LEN 16
1888 +#define PAX_DELTA_STACK_LEN 16
1889 +#endif
1890 +
1891 /* This yields a mask that user programs can use to figure out what
1892 instruction set this CPU supports. This could be done in user space,
1893 but it's not easy, and we've already done it here. */
1894 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1895 index 22dadeb..f6c2be4 100644
1896 --- a/arch/parisc/include/asm/pgtable.h
1897 +++ b/arch/parisc/include/asm/pgtable.h
1898 @@ -210,6 +210,17 @@ struct vm_area_struct;
1899 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1900 #define PAGE_COPY PAGE_EXECREAD
1901 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1902 +
1903 +#ifdef CONFIG_PAX_PAGEEXEC
1904 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1905 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1906 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1907 +#else
1908 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1909 +# define PAGE_COPY_NOEXEC PAGE_COPY
1910 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1911 +#endif
1912 +
1913 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1914 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1915 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1916 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1917 index 5e34ccf..672bc9c 100644
1918 --- a/arch/parisc/kernel/module.c
1919 +++ b/arch/parisc/kernel/module.c
1920 @@ -98,16 +98,38 @@
1921
1922 /* three functions to determine where in the module core
1923 * or init pieces the location is */
1924 +static inline int in_init_rx(struct module *me, void *loc)
1925 +{
1926 + return (loc >= me->module_init_rx &&
1927 + loc < (me->module_init_rx + me->init_size_rx));
1928 +}
1929 +
1930 +static inline int in_init_rw(struct module *me, void *loc)
1931 +{
1932 + return (loc >= me->module_init_rw &&
1933 + loc < (me->module_init_rw + me->init_size_rw));
1934 +}
1935 +
1936 static inline int in_init(struct module *me, void *loc)
1937 {
1938 - return (loc >= me->module_init &&
1939 - loc <= (me->module_init + me->init_size));
1940 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1941 +}
1942 +
1943 +static inline int in_core_rx(struct module *me, void *loc)
1944 +{
1945 + return (loc >= me->module_core_rx &&
1946 + loc < (me->module_core_rx + me->core_size_rx));
1947 +}
1948 +
1949 +static inline int in_core_rw(struct module *me, void *loc)
1950 +{
1951 + return (loc >= me->module_core_rw &&
1952 + loc < (me->module_core_rw + me->core_size_rw));
1953 }
1954
1955 static inline int in_core(struct module *me, void *loc)
1956 {
1957 - return (loc >= me->module_core &&
1958 - loc <= (me->module_core + me->core_size));
1959 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1960 }
1961
1962 static inline int in_local(struct module *me, void *loc)
1963 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1964 }
1965
1966 /* align things a bit */
1967 - me->core_size = ALIGN(me->core_size, 16);
1968 - me->arch.got_offset = me->core_size;
1969 - me->core_size += gots * sizeof(struct got_entry);
1970 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1971 + me->arch.got_offset = me->core_size_rw;
1972 + me->core_size_rw += gots * sizeof(struct got_entry);
1973
1974 - me->core_size = ALIGN(me->core_size, 16);
1975 - me->arch.fdesc_offset = me->core_size;
1976 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1977 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1978 + me->arch.fdesc_offset = me->core_size_rw;
1979 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1980
1981 me->arch.got_max = gots;
1982 me->arch.fdesc_max = fdescs;
1983 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1984
1985 BUG_ON(value == 0);
1986
1987 - got = me->module_core + me->arch.got_offset;
1988 + got = me->module_core_rw + me->arch.got_offset;
1989 for (i = 0; got[i].addr; i++)
1990 if (got[i].addr == value)
1991 goto out;
1992 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1993 #ifdef CONFIG_64BIT
1994 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1995 {
1996 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1997 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1998
1999 if (!value) {
2000 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2001 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2002
2003 /* Create new one */
2004 fdesc->addr = value;
2005 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2006 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2007 return (Elf_Addr)fdesc;
2008 }
2009 #endif /* CONFIG_64BIT */
2010 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
2011
2012 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2013 end = table + sechdrs[me->arch.unwind_section].sh_size;
2014 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2015 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2016
2017 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2018 me->arch.unwind_section, table, end, gp);
2019 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2020 index c9b9322..02d8940 100644
2021 --- a/arch/parisc/kernel/sys_parisc.c
2022 +++ b/arch/parisc/kernel/sys_parisc.c
2023 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2024 /* At this point: (!vma || addr < vma->vm_end). */
2025 if (TASK_SIZE - len < addr)
2026 return -ENOMEM;
2027 - if (!vma || addr + len <= vma->vm_start)
2028 + if (check_heap_stack_gap(vma, addr, len))
2029 return addr;
2030 addr = vma->vm_end;
2031 }
2032 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2033 /* At this point: (!vma || addr < vma->vm_end). */
2034 if (TASK_SIZE - len < addr)
2035 return -ENOMEM;
2036 - if (!vma || addr + len <= vma->vm_start)
2037 + if (check_heap_stack_gap(vma, addr, len))
2038 return addr;
2039 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2040 if (addr < vma->vm_end) /* handle wraparound */
2041 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2042 if (flags & MAP_FIXED)
2043 return addr;
2044 if (!addr)
2045 - addr = TASK_UNMAPPED_BASE;
2046 + addr = current->mm->mmap_base;
2047
2048 if (filp) {
2049 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2050 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2051 index f19e660..414fe24 100644
2052 --- a/arch/parisc/kernel/traps.c
2053 +++ b/arch/parisc/kernel/traps.c
2054 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2055
2056 down_read(&current->mm->mmap_sem);
2057 vma = find_vma(current->mm,regs->iaoq[0]);
2058 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2059 - && (vma->vm_flags & VM_EXEC)) {
2060 -
2061 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2062 fault_address = regs->iaoq[0];
2063 fault_space = regs->iasq[0];
2064
2065 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2066 index 18162ce..94de376 100644
2067 --- a/arch/parisc/mm/fault.c
2068 +++ b/arch/parisc/mm/fault.c
2069 @@ -15,6 +15,7 @@
2070 #include <linux/sched.h>
2071 #include <linux/interrupt.h>
2072 #include <linux/module.h>
2073 +#include <linux/unistd.h>
2074
2075 #include <asm/uaccess.h>
2076 #include <asm/traps.h>
2077 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2078 static unsigned long
2079 parisc_acctyp(unsigned long code, unsigned int inst)
2080 {
2081 - if (code == 6 || code == 16)
2082 + if (code == 6 || code == 7 || code == 16)
2083 return VM_EXEC;
2084
2085 switch (inst & 0xf0000000) {
2086 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2087 }
2088 #endif
2089
2090 +#ifdef CONFIG_PAX_PAGEEXEC
2091 +/*
2092 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2093 + *
2094 + * returns 1 when task should be killed
2095 + * 2 when rt_sigreturn trampoline was detected
2096 + * 3 when unpatched PLT trampoline was detected
2097 + */
2098 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2099 +{
2100 +
2101 +#ifdef CONFIG_PAX_EMUPLT
2102 + int err;
2103 +
2104 + do { /* PaX: unpatched PLT emulation */
2105 + unsigned int bl, depwi;
2106 +
2107 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2108 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2109 +
2110 + if (err)
2111 + break;
2112 +
2113 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2114 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2115 +
2116 + err = get_user(ldw, (unsigned int *)addr);
2117 + err |= get_user(bv, (unsigned int *)(addr+4));
2118 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2119 +
2120 + if (err)
2121 + break;
2122 +
2123 + if (ldw == 0x0E801096U &&
2124 + bv == 0xEAC0C000U &&
2125 + ldw2 == 0x0E881095U)
2126 + {
2127 + unsigned int resolver, map;
2128 +
2129 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2130 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2131 + if (err)
2132 + break;
2133 +
2134 + regs->gr[20] = instruction_pointer(regs)+8;
2135 + regs->gr[21] = map;
2136 + regs->gr[22] = resolver;
2137 + regs->iaoq[0] = resolver | 3UL;
2138 + regs->iaoq[1] = regs->iaoq[0] + 4;
2139 + return 3;
2140 + }
2141 + }
2142 + } while (0);
2143 +#endif
2144 +
2145 +#ifdef CONFIG_PAX_EMUTRAMP
2146 +
2147 +#ifndef CONFIG_PAX_EMUSIGRT
2148 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2149 + return 1;
2150 +#endif
2151 +
2152 + do { /* PaX: rt_sigreturn emulation */
2153 + unsigned int ldi1, ldi2, bel, nop;
2154 +
2155 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2156 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2157 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2158 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2159 +
2160 + if (err)
2161 + break;
2162 +
2163 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2164 + ldi2 == 0x3414015AU &&
2165 + bel == 0xE4008200U &&
2166 + nop == 0x08000240U)
2167 + {
2168 + regs->gr[25] = (ldi1 & 2) >> 1;
2169 + regs->gr[20] = __NR_rt_sigreturn;
2170 + regs->gr[31] = regs->iaoq[1] + 16;
2171 + regs->sr[0] = regs->iasq[1];
2172 + regs->iaoq[0] = 0x100UL;
2173 + regs->iaoq[1] = regs->iaoq[0] + 4;
2174 + regs->iasq[0] = regs->sr[2];
2175 + regs->iasq[1] = regs->sr[2];
2176 + return 2;
2177 + }
2178 + } while (0);
2179 +#endif
2180 +
2181 + return 1;
2182 +}
2183 +
2184 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2185 +{
2186 + unsigned long i;
2187 +
2188 + printk(KERN_ERR "PAX: bytes at PC: ");
2189 + for (i = 0; i < 5; i++) {
2190 + unsigned int c;
2191 + if (get_user(c, (unsigned int *)pc+i))
2192 + printk(KERN_CONT "???????? ");
2193 + else
2194 + printk(KERN_CONT "%08x ", c);
2195 + }
2196 + printk("\n");
2197 +}
2198 +#endif
2199 +
2200 int fixup_exception(struct pt_regs *regs)
2201 {
2202 const struct exception_table_entry *fix;
2203 @@ -192,8 +303,33 @@ good_area:
2204
2205 acc_type = parisc_acctyp(code,regs->iir);
2206
2207 - if ((vma->vm_flags & acc_type) != acc_type)
2208 + if ((vma->vm_flags & acc_type) != acc_type) {
2209 +
2210 +#ifdef CONFIG_PAX_PAGEEXEC
2211 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2212 + (address & ~3UL) == instruction_pointer(regs))
2213 + {
2214 + up_read(&mm->mmap_sem);
2215 + switch (pax_handle_fetch_fault(regs)) {
2216 +
2217 +#ifdef CONFIG_PAX_EMUPLT
2218 + case 3:
2219 + return;
2220 +#endif
2221 +
2222 +#ifdef CONFIG_PAX_EMUTRAMP
2223 + case 2:
2224 + return;
2225 +#endif
2226 +
2227 + }
2228 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2229 + do_group_exit(SIGKILL);
2230 + }
2231 +#endif
2232 +
2233 goto bad_area;
2234 + }
2235
2236 /*
2237 * If for any reason at all we couldn't handle the fault, make
2238 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
2239 index 02e41b5..ec6e26c 100644
2240 --- a/arch/powerpc/include/asm/atomic.h
2241 +++ b/arch/powerpc/include/asm/atomic.h
2242 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2243
2244 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2245
2246 +#define atomic64_read_unchecked(v) atomic64_read(v)
2247 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2248 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2249 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2250 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2251 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2252 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2253 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2254 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2255 +
2256 #endif /* __powerpc64__ */
2257
2258 #endif /* __KERNEL__ */
2259 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2260 index 3bf9cca..e7457d0 100644
2261 --- a/arch/powerpc/include/asm/elf.h
2262 +++ b/arch/powerpc/include/asm/elf.h
2263 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2264 the loader. We need to make sure that it is out of the way of the program
2265 that it will "exec", and that there is sufficient room for the brk. */
2266
2267 -extern unsigned long randomize_et_dyn(unsigned long base);
2268 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2269 +#define ELF_ET_DYN_BASE (0x20000000)
2270 +
2271 +#ifdef CONFIG_PAX_ASLR
2272 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2273 +
2274 +#ifdef __powerpc64__
2275 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2276 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2277 +#else
2278 +#define PAX_DELTA_MMAP_LEN 15
2279 +#define PAX_DELTA_STACK_LEN 15
2280 +#endif
2281 +#endif
2282
2283 /*
2284 * Our registers are always unsigned longs, whether we're a 32 bit
2285 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2286 (0x7ff >> (PAGE_SHIFT - 12)) : \
2287 (0x3ffff >> (PAGE_SHIFT - 12)))
2288
2289 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2290 -#define arch_randomize_brk arch_randomize_brk
2291 -
2292 #endif /* __KERNEL__ */
2293
2294 /*
2295 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2296 index bca8fdc..61e9580 100644
2297 --- a/arch/powerpc/include/asm/kmap_types.h
2298 +++ b/arch/powerpc/include/asm/kmap_types.h
2299 @@ -27,6 +27,7 @@ enum km_type {
2300 KM_PPC_SYNC_PAGE,
2301 KM_PPC_SYNC_ICACHE,
2302 KM_KDB,
2303 + KM_CLEARPAGE,
2304 KM_TYPE_NR
2305 };
2306
2307 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2308 index d4a7f64..451de1c 100644
2309 --- a/arch/powerpc/include/asm/mman.h
2310 +++ b/arch/powerpc/include/asm/mman.h
2311 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2312 }
2313 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2314
2315 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2316 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2317 {
2318 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2319 }
2320 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2321 index dd9c4fd..a2ced87 100644
2322 --- a/arch/powerpc/include/asm/page.h
2323 +++ b/arch/powerpc/include/asm/page.h
2324 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2325 * and needs to be executable. This means the whole heap ends
2326 * up being executable.
2327 */
2328 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2329 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2330 +#define VM_DATA_DEFAULT_FLAGS32 \
2331 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2332 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2333
2334 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2335 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2336 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2337 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2338 #endif
2339
2340 +#define ktla_ktva(addr) (addr)
2341 +#define ktva_ktla(addr) (addr)
2342 +
2343 /*
2344 * Use the top bit of the higher-level page table entries to indicate whether
2345 * the entries we point to contain hugepages. This works because we know that
2346 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2347 index fb40ede..d3ce956 100644
2348 --- a/arch/powerpc/include/asm/page_64.h
2349 +++ b/arch/powerpc/include/asm/page_64.h
2350 @@ -144,15 +144,18 @@ do { \
2351 * stack by default, so in the absence of a PT_GNU_STACK program header
2352 * we turn execute permission off.
2353 */
2354 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2355 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2356 +#define VM_STACK_DEFAULT_FLAGS32 \
2357 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2358 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2359
2360 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2361 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2362
2363 +#ifndef CONFIG_PAX_PAGEEXEC
2364 #define VM_STACK_DEFAULT_FLAGS \
2365 (is_32bit_task() ? \
2366 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2367 +#endif
2368
2369 #include <asm-generic/getorder.h>
2370
2371 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2372 index 88b0bd9..e32bc67 100644
2373 --- a/arch/powerpc/include/asm/pgtable.h
2374 +++ b/arch/powerpc/include/asm/pgtable.h
2375 @@ -2,6 +2,7 @@
2376 #define _ASM_POWERPC_PGTABLE_H
2377 #ifdef __KERNEL__
2378
2379 +#include <linux/const.h>
2380 #ifndef __ASSEMBLY__
2381 #include <asm/processor.h> /* For TASK_SIZE */
2382 #include <asm/mmu.h>
2383 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2384 index 4aad413..85d86bf 100644
2385 --- a/arch/powerpc/include/asm/pte-hash32.h
2386 +++ b/arch/powerpc/include/asm/pte-hash32.h
2387 @@ -21,6 +21,7 @@
2388 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2389 #define _PAGE_USER 0x004 /* usermode access allowed */
2390 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2391 +#define _PAGE_EXEC _PAGE_GUARDED
2392 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2393 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2394 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2395 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2396 index 559da19..7e5835c 100644
2397 --- a/arch/powerpc/include/asm/reg.h
2398 +++ b/arch/powerpc/include/asm/reg.h
2399 @@ -212,6 +212,7 @@
2400 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2401 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2402 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2403 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2404 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2405 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2406 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2407 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2408 index e30a13d..2b7d994 100644
2409 --- a/arch/powerpc/include/asm/system.h
2410 +++ b/arch/powerpc/include/asm/system.h
2411 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2412 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2413 #endif
2414
2415 -extern unsigned long arch_align_stack(unsigned long sp);
2416 +#define arch_align_stack(x) ((x) & ~0xfUL)
2417
2418 /* Used in very early kernel initialization. */
2419 extern unsigned long reloc_offset(void);
2420 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2421 index bd0fb84..a42a14b 100644
2422 --- a/arch/powerpc/include/asm/uaccess.h
2423 +++ b/arch/powerpc/include/asm/uaccess.h
2424 @@ -13,6 +13,8 @@
2425 #define VERIFY_READ 0
2426 #define VERIFY_WRITE 1
2427
2428 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2429 +
2430 /*
2431 * The fs value determines whether argument validity checking should be
2432 * performed or not. If get_fs() == USER_DS, checking is performed, with
2433 @@ -327,52 +329,6 @@ do { \
2434 extern unsigned long __copy_tofrom_user(void __user *to,
2435 const void __user *from, unsigned long size);
2436
2437 -#ifndef __powerpc64__
2438 -
2439 -static inline unsigned long copy_from_user(void *to,
2440 - const void __user *from, unsigned long n)
2441 -{
2442 - unsigned long over;
2443 -
2444 - if (access_ok(VERIFY_READ, from, n))
2445 - return __copy_tofrom_user((__force void __user *)to, from, n);
2446 - if ((unsigned long)from < TASK_SIZE) {
2447 - over = (unsigned long)from + n - TASK_SIZE;
2448 - return __copy_tofrom_user((__force void __user *)to, from,
2449 - n - over) + over;
2450 - }
2451 - return n;
2452 -}
2453 -
2454 -static inline unsigned long copy_to_user(void __user *to,
2455 - const void *from, unsigned long n)
2456 -{
2457 - unsigned long over;
2458 -
2459 - if (access_ok(VERIFY_WRITE, to, n))
2460 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2461 - if ((unsigned long)to < TASK_SIZE) {
2462 - over = (unsigned long)to + n - TASK_SIZE;
2463 - return __copy_tofrom_user(to, (__force void __user *)from,
2464 - n - over) + over;
2465 - }
2466 - return n;
2467 -}
2468 -
2469 -#else /* __powerpc64__ */
2470 -
2471 -#define __copy_in_user(to, from, size) \
2472 - __copy_tofrom_user((to), (from), (size))
2473 -
2474 -extern unsigned long copy_from_user(void *to, const void __user *from,
2475 - unsigned long n);
2476 -extern unsigned long copy_to_user(void __user *to, const void *from,
2477 - unsigned long n);
2478 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2479 - unsigned long n);
2480 -
2481 -#endif /* __powerpc64__ */
2482 -
2483 static inline unsigned long __copy_from_user_inatomic(void *to,
2484 const void __user *from, unsigned long n)
2485 {
2486 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2487 if (ret == 0)
2488 return 0;
2489 }
2490 +
2491 + if (!__builtin_constant_p(n))
2492 + check_object_size(to, n, false);
2493 +
2494 return __copy_tofrom_user((__force void __user *)to, from, n);
2495 }
2496
2497 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2498 if (ret == 0)
2499 return 0;
2500 }
2501 +
2502 + if (!__builtin_constant_p(n))
2503 + check_object_size(from, n, true);
2504 +
2505 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2506 }
2507
2508 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2509 return __copy_to_user_inatomic(to, from, size);
2510 }
2511
2512 +#ifndef __powerpc64__
2513 +
2514 +static inline unsigned long __must_check copy_from_user(void *to,
2515 + const void __user *from, unsigned long n)
2516 +{
2517 + unsigned long over;
2518 +
2519 + if ((long)n < 0)
2520 + return n;
2521 +
2522 + if (access_ok(VERIFY_READ, from, n)) {
2523 + if (!__builtin_constant_p(n))
2524 + check_object_size(to, n, false);
2525 + return __copy_tofrom_user((__force void __user *)to, from, n);
2526 + }
2527 + if ((unsigned long)from < TASK_SIZE) {
2528 + over = (unsigned long)from + n - TASK_SIZE;
2529 + if (!__builtin_constant_p(n - over))
2530 + check_object_size(to, n - over, false);
2531 + return __copy_tofrom_user((__force void __user *)to, from,
2532 + n - over) + over;
2533 + }
2534 + return n;
2535 +}
2536 +
2537 +static inline unsigned long __must_check copy_to_user(void __user *to,
2538 + const void *from, unsigned long n)
2539 +{
2540 + unsigned long over;
2541 +
2542 + if ((long)n < 0)
2543 + return n;
2544 +
2545 + if (access_ok(VERIFY_WRITE, to, n)) {
2546 + if (!__builtin_constant_p(n))
2547 + check_object_size(from, n, true);
2548 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2549 + }
2550 + if ((unsigned long)to < TASK_SIZE) {
2551 + over = (unsigned long)to + n - TASK_SIZE;
2552 + if (!__builtin_constant_p(n))
2553 + check_object_size(from, n - over, true);
2554 + return __copy_tofrom_user(to, (__force void __user *)from,
2555 + n - over) + over;
2556 + }
2557 + return n;
2558 +}
2559 +
2560 +#else /* __powerpc64__ */
2561 +
2562 +#define __copy_in_user(to, from, size) \
2563 + __copy_tofrom_user((to), (from), (size))
2564 +
2565 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2566 +{
2567 + if ((long)n < 0 || n > INT_MAX)
2568 + return n;
2569 +
2570 + if (!__builtin_constant_p(n))
2571 + check_object_size(to, n, false);
2572 +
2573 + if (likely(access_ok(VERIFY_READ, from, n)))
2574 + n = __copy_from_user(to, from, n);
2575 + else
2576 + memset(to, 0, n);
2577 + return n;
2578 +}
2579 +
2580 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2581 +{
2582 + if ((long)n < 0 || n > INT_MAX)
2583 + return n;
2584 +
2585 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2586 + if (!__builtin_constant_p(n))
2587 + check_object_size(from, n, true);
2588 + n = __copy_to_user(to, from, n);
2589 + }
2590 + return n;
2591 +}
2592 +
2593 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2594 + unsigned long n);
2595 +
2596 +#endif /* __powerpc64__ */
2597 +
2598 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2599
2600 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2601 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2602 index 429983c..7af363b 100644
2603 --- a/arch/powerpc/kernel/exceptions-64e.S
2604 +++ b/arch/powerpc/kernel/exceptions-64e.S
2605 @@ -587,6 +587,7 @@ storage_fault_common:
2606 std r14,_DAR(r1)
2607 std r15,_DSISR(r1)
2608 addi r3,r1,STACK_FRAME_OVERHEAD
2609 + bl .save_nvgprs
2610 mr r4,r14
2611 mr r5,r15
2612 ld r14,PACA_EXGEN+EX_R14(r13)
2613 @@ -596,8 +597,7 @@ storage_fault_common:
2614 cmpdi r3,0
2615 bne- 1f
2616 b .ret_from_except_lite
2617 -1: bl .save_nvgprs
2618 - mr r5,r3
2619 +1: mr r5,r3
2620 addi r3,r1,STACK_FRAME_OVERHEAD
2621 ld r4,_DAR(r1)
2622 bl .bad_page_fault
2623 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2624 index cf9c69b..ebc9640 100644
2625 --- a/arch/powerpc/kernel/exceptions-64s.S
2626 +++ b/arch/powerpc/kernel/exceptions-64s.S
2627 @@ -1004,10 +1004,10 @@ handle_page_fault:
2628 11: ld r4,_DAR(r1)
2629 ld r5,_DSISR(r1)
2630 addi r3,r1,STACK_FRAME_OVERHEAD
2631 + bl .save_nvgprs
2632 bl .do_page_fault
2633 cmpdi r3,0
2634 beq+ 13f
2635 - bl .save_nvgprs
2636 mr r5,r3
2637 addi r3,r1,STACK_FRAME_OVERHEAD
2638 lwz r4,_DAR(r1)
2639 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2640 index 0b6d796..d760ddb 100644
2641 --- a/arch/powerpc/kernel/module_32.c
2642 +++ b/arch/powerpc/kernel/module_32.c
2643 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2644 me->arch.core_plt_section = i;
2645 }
2646 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2647 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2648 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2649 return -ENOEXEC;
2650 }
2651
2652 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2653
2654 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2655 /* Init, or core PLT? */
2656 - if (location >= mod->module_core
2657 - && location < mod->module_core + mod->core_size)
2658 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2659 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2660 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2661 - else
2662 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2663 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2664 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2665 + else {
2666 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2667 + return ~0UL;
2668 + }
2669
2670 /* Find this entry, or if that fails, the next avail. entry */
2671 while (entry->jump[0]) {
2672 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2673 index 6457574..08b28d3 100644
2674 --- a/arch/powerpc/kernel/process.c
2675 +++ b/arch/powerpc/kernel/process.c
2676 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695 - printk(" (%pS)",
2696 + printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713 -
2714 -unsigned long arch_align_stack(unsigned long sp)
2715 -{
2716 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717 - sp -= get_random_int() & ~PAGE_MASK;
2718 - return sp & ~0xf;
2719 -}
2720 -
2721 -static inline unsigned long brk_rnd(void)
2722 -{
2723 - unsigned long rnd = 0;
2724 -
2725 - /* 8MB for 32bit, 1GB for 64bit */
2726 - if (is_32bit_task())
2727 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728 - else
2729 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730 -
2731 - return rnd << PAGE_SHIFT;
2732 -}
2733 -
2734 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2735 -{
2736 - unsigned long base = mm->brk;
2737 - unsigned long ret;
2738 -
2739 -#ifdef CONFIG_PPC_STD_MMU_64
2740 - /*
2741 - * If we are using 1TB segments and we are allowed to randomise
2742 - * the heap, we can put it above 1TB so it is backed by a 1TB
2743 - * segment. Otherwise the heap will be in the bottom 1TB
2744 - * which always uses 256MB segments and this may result in a
2745 - * performance penalty.
2746 - */
2747 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749 -#endif
2750 -
2751 - ret = PAGE_ALIGN(base + brk_rnd());
2752 -
2753 - if (ret < mm->brk)
2754 - return mm->brk;
2755 -
2756 - return ret;
2757 -}
2758 -
2759 -unsigned long randomize_et_dyn(unsigned long base)
2760 -{
2761 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762 -
2763 - if (ret < base)
2764 - return base;
2765 -
2766 - return ret;
2767 -}
2768 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2769 index 836a5a1..27289a3 100644
2770 --- a/arch/powerpc/kernel/signal_32.c
2771 +++ b/arch/powerpc/kernel/signal_32.c
2772 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2773 /* Save user registers on the stack */
2774 frame = &rt_sf->uc.uc_mcontext;
2775 addr = frame;
2776 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2777 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2778 if (save_user_regs(regs, frame, 0, 1))
2779 goto badframe;
2780 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2781 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2782 index a50b5ec..547078a 100644
2783 --- a/arch/powerpc/kernel/signal_64.c
2784 +++ b/arch/powerpc/kernel/signal_64.c
2785 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2786 current->thread.fpscr.val = 0;
2787
2788 /* Set up to return from userspace. */
2789 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2790 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2792 } else {
2793 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2794 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2795 index 5459d14..10f8070 100644
2796 --- a/arch/powerpc/kernel/traps.c
2797 +++ b/arch/powerpc/kernel/traps.c
2798 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2799 static inline void pmac_backlight_unblank(void) { }
2800 #endif
2801
2802 +extern void gr_handle_kernel_exploit(void);
2803 +
2804 int die(const char *str, struct pt_regs *regs, long err)
2805 {
2806 static struct {
2807 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2808 if (panic_on_oops)
2809 panic("Fatal exception");
2810
2811 + gr_handle_kernel_exploit();
2812 +
2813 oops_exit();
2814 do_exit(err);
2815
2816 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2817 index 7d14bb6..1305601 100644
2818 --- a/arch/powerpc/kernel/vdso.c
2819 +++ b/arch/powerpc/kernel/vdso.c
2820 @@ -35,6 +35,7 @@
2821 #include <asm/firmware.h>
2822 #include <asm/vdso.h>
2823 #include <asm/vdso_datapage.h>
2824 +#include <asm/mman.h>
2825
2826 #include "setup.h"
2827
2828 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2829 vdso_base = VDSO32_MBASE;
2830 #endif
2831
2832 - current->mm->context.vdso_base = 0;
2833 + current->mm->context.vdso_base = ~0UL;
2834
2835 /* vDSO has a problem and was disabled, just don't "enable" it for the
2836 * process
2837 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2838 vdso_base = get_unmapped_area(NULL, vdso_base,
2839 (vdso_pages << PAGE_SHIFT) +
2840 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2841 - 0, 0);
2842 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2843 if (IS_ERR_VALUE(vdso_base)) {
2844 rc = vdso_base;
2845 goto fail_mmapsem;
2846 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2847 index 5eea6f3..5d10396 100644
2848 --- a/arch/powerpc/lib/usercopy_64.c
2849 +++ b/arch/powerpc/lib/usercopy_64.c
2850 @@ -9,22 +9,6 @@
2851 #include <linux/module.h>
2852 #include <asm/uaccess.h>
2853
2854 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2855 -{
2856 - if (likely(access_ok(VERIFY_READ, from, n)))
2857 - n = __copy_from_user(to, from, n);
2858 - else
2859 - memset(to, 0, n);
2860 - return n;
2861 -}
2862 -
2863 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2864 -{
2865 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2866 - n = __copy_to_user(to, from, n);
2867 - return n;
2868 -}
2869 -
2870 unsigned long copy_in_user(void __user *to, const void __user *from,
2871 unsigned long n)
2872 {
2873 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2874 return n;
2875 }
2876
2877 -EXPORT_SYMBOL(copy_from_user);
2878 -EXPORT_SYMBOL(copy_to_user);
2879 EXPORT_SYMBOL(copy_in_user);
2880
2881 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2882 index 5efe8c9..db9ceef 100644
2883 --- a/arch/powerpc/mm/fault.c
2884 +++ b/arch/powerpc/mm/fault.c
2885 @@ -32,6 +32,10 @@
2886 #include <linux/perf_event.h>
2887 #include <linux/magic.h>
2888 #include <linux/ratelimit.h>
2889 +#include <linux/slab.h>
2890 +#include <linux/pagemap.h>
2891 +#include <linux/compiler.h>
2892 +#include <linux/unistd.h>
2893
2894 #include <asm/firmware.h>
2895 #include <asm/page.h>
2896 @@ -43,6 +47,7 @@
2897 #include <asm/tlbflush.h>
2898 #include <asm/siginfo.h>
2899 #include <mm/mmu_decl.h>
2900 +#include <asm/ptrace.h>
2901
2902 #ifdef CONFIG_KPROBES
2903 static inline int notify_page_fault(struct pt_regs *regs)
2904 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2905 }
2906 #endif
2907
2908 +#ifdef CONFIG_PAX_PAGEEXEC
2909 +/*
2910 + * PaX: decide what to do with offenders (regs->nip = fault address)
2911 + *
2912 + * returns 1 when task should be killed
2913 + */
2914 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2915 +{
2916 + return 1;
2917 +}
2918 +
2919 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2920 +{
2921 + unsigned long i;
2922 +
2923 + printk(KERN_ERR "PAX: bytes at PC: ");
2924 + for (i = 0; i < 5; i++) {
2925 + unsigned int c;
2926 + if (get_user(c, (unsigned int __user *)pc+i))
2927 + printk(KERN_CONT "???????? ");
2928 + else
2929 + printk(KERN_CONT "%08x ", c);
2930 + }
2931 + printk("\n");
2932 +}
2933 +#endif
2934 +
2935 /*
2936 * Check whether the instruction at regs->nip is a store using
2937 * an update addressing form which will update r1.
2938 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2939 * indicate errors in DSISR but can validly be set in SRR1.
2940 */
2941 if (trap == 0x400)
2942 - error_code &= 0x48200000;
2943 + error_code &= 0x58200000;
2944 else
2945 is_write = error_code & DSISR_ISSTORE;
2946 #else
2947 @@ -259,7 +291,7 @@ good_area:
2948 * "undefined". Of those that can be set, this is the only
2949 * one which seems bad.
2950 */
2951 - if (error_code & 0x10000000)
2952 + if (error_code & DSISR_GUARDED)
2953 /* Guarded storage error. */
2954 goto bad_area;
2955 #endif /* CONFIG_8xx */
2956 @@ -274,7 +306,7 @@ good_area:
2957 * processors use the same I/D cache coherency mechanism
2958 * as embedded.
2959 */
2960 - if (error_code & DSISR_PROTFAULT)
2961 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2962 goto bad_area;
2963 #endif /* CONFIG_PPC_STD_MMU */
2964
2965 @@ -343,6 +375,23 @@ bad_area:
2966 bad_area_nosemaphore:
2967 /* User mode accesses cause a SIGSEGV */
2968 if (user_mode(regs)) {
2969 +
2970 +#ifdef CONFIG_PAX_PAGEEXEC
2971 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2972 +#ifdef CONFIG_PPC_STD_MMU
2973 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2974 +#else
2975 + if (is_exec && regs->nip == address) {
2976 +#endif
2977 + switch (pax_handle_fetch_fault(regs)) {
2978 + }
2979 +
2980 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2981 + do_group_exit(SIGKILL);
2982 + }
2983 + }
2984 +#endif
2985 +
2986 _exception(SIGSEGV, regs, code, address);
2987 return 0;
2988 }
2989 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2990 index 5a783d8..c23e14b 100644
2991 --- a/arch/powerpc/mm/mmap_64.c
2992 +++ b/arch/powerpc/mm/mmap_64.c
2993 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2994 */
2995 if (mmap_is_legacy()) {
2996 mm->mmap_base = TASK_UNMAPPED_BASE;
2997 +
2998 +#ifdef CONFIG_PAX_RANDMMAP
2999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3000 + mm->mmap_base += mm->delta_mmap;
3001 +#endif
3002 +
3003 mm->get_unmapped_area = arch_get_unmapped_area;
3004 mm->unmap_area = arch_unmap_area;
3005 } else {
3006 mm->mmap_base = mmap_base();
3007 +
3008 +#ifdef CONFIG_PAX_RANDMMAP
3009 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3010 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3011 +#endif
3012 +
3013 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3014 mm->unmap_area = arch_unmap_area_topdown;
3015 }
3016 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3017 index 73709f7..6b90313 100644
3018 --- a/arch/powerpc/mm/slice.c
3019 +++ b/arch/powerpc/mm/slice.c
3020 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3021 if ((mm->task_size - len) < addr)
3022 return 0;
3023 vma = find_vma(mm, addr);
3024 - return (!vma || (addr + len) <= vma->vm_start);
3025 + return check_heap_stack_gap(vma, addr, len);
3026 }
3027
3028 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3029 @@ -256,7 +256,7 @@ full_search:
3030 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3031 continue;
3032 }
3033 - if (!vma || addr + len <= vma->vm_start) {
3034 + if (check_heap_stack_gap(vma, addr, len)) {
3035 /*
3036 * Remember the place where we stopped the search:
3037 */
3038 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3039 }
3040 }
3041
3042 - addr = mm->mmap_base;
3043 - while (addr > len) {
3044 + if (mm->mmap_base < len)
3045 + addr = -ENOMEM;
3046 + else
3047 + addr = mm->mmap_base - len;
3048 +
3049 + while (!IS_ERR_VALUE(addr)) {
3050 /* Go down by chunk size */
3051 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3052 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3053
3054 /* Check for hit with different page size */
3055 mask = slice_range_to_mask(addr, len);
3056 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3057 * return with success:
3058 */
3059 vma = find_vma(mm, addr);
3060 - if (!vma || (addr + len) <= vma->vm_start) {
3061 + if (check_heap_stack_gap(vma, addr, len)) {
3062 /* remember the address as a hint for next time */
3063 if (use_cache)
3064 mm->free_area_cache = addr;
3065 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3066 mm->cached_hole_size = vma->vm_start - addr;
3067
3068 /* try just below the current vma->vm_start */
3069 - addr = vma->vm_start;
3070 + addr = skip_heap_stack_gap(vma, len);
3071 }
3072
3073 /*
3074 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3075 if (fixed && addr > (mm->task_size - len))
3076 return -EINVAL;
3077
3078 +#ifdef CONFIG_PAX_RANDMMAP
3079 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3080 + addr = 0;
3081 +#endif
3082 +
3083 /* If hint, make sure it matches our alignment restrictions */
3084 if (!fixed && addr) {
3085 addr = _ALIGN_UP(addr, 1ul << pshift);
3086 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3087 index 8517d2a..d2738d4 100644
3088 --- a/arch/s390/include/asm/atomic.h
3089 +++ b/arch/s390/include/asm/atomic.h
3090 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
3091 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
3092 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3093
3094 +#define atomic64_read_unchecked(v) atomic64_read(v)
3095 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3096 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3097 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3098 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3099 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3100 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3101 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3102 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3103 +
3104 #define smp_mb__before_atomic_dec() smp_mb()
3105 #define smp_mb__after_atomic_dec() smp_mb()
3106 #define smp_mb__before_atomic_inc() smp_mb()
3107 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3108 index 547f1a6..0b22b53 100644
3109 --- a/arch/s390/include/asm/elf.h
3110 +++ b/arch/s390/include/asm/elf.h
3111 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
3112 the loader. We need to make sure that it is out of the way of the program
3113 that it will "exec", and that there is sufficient room for the brk. */
3114
3115 -extern unsigned long randomize_et_dyn(unsigned long base);
3116 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
3117 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3118 +
3119 +#ifdef CONFIG_PAX_ASLR
3120 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3121 +
3122 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3123 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3124 +#endif
3125
3126 /* This yields a mask that user programs can use to figure out what
3127 instruction set this CPU supports. */
3128 @@ -211,7 +217,4 @@ struct linux_binprm;
3129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
3130 int arch_setup_additional_pages(struct linux_binprm *, int);
3131
3132 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3133 -#define arch_randomize_brk arch_randomize_brk
3134 -
3135 #endif
3136 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
3137 index ef573c1..75a1ce6 100644
3138 --- a/arch/s390/include/asm/system.h
3139 +++ b/arch/s390/include/asm/system.h
3140 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
3141 extern void (*_machine_halt)(void);
3142 extern void (*_machine_power_off)(void);
3143
3144 -extern unsigned long arch_align_stack(unsigned long sp);
3145 +#define arch_align_stack(x) ((x) & ~0xfUL)
3146
3147 static inline int tprot(unsigned long addr)
3148 {
3149 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3150 index 2b23885..e136e31 100644
3151 --- a/arch/s390/include/asm/uaccess.h
3152 +++ b/arch/s390/include/asm/uaccess.h
3153 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
3154 copy_to_user(void __user *to, const void *from, unsigned long n)
3155 {
3156 might_fault();
3157 +
3158 + if ((long)n < 0)
3159 + return n;
3160 +
3161 if (access_ok(VERIFY_WRITE, to, n))
3162 n = __copy_to_user(to, from, n);
3163 return n;
3164 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3165 static inline unsigned long __must_check
3166 __copy_from_user(void *to, const void __user *from, unsigned long n)
3167 {
3168 + if ((long)n < 0)
3169 + return n;
3170 +
3171 if (__builtin_constant_p(n) && (n <= 256))
3172 return uaccess.copy_from_user_small(n, from, to);
3173 else
3174 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
3175 unsigned int sz = __compiletime_object_size(to);
3176
3177 might_fault();
3178 +
3179 + if ((long)n < 0)
3180 + return n;
3181 +
3182 if (unlikely(sz != -1 && sz < n)) {
3183 copy_from_user_overflow();
3184 return n;
3185 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3186 index dfcb343..eda788a 100644
3187 --- a/arch/s390/kernel/module.c
3188 +++ b/arch/s390/kernel/module.c
3189 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3190
3191 /* Increase core size by size of got & plt and set start
3192 offsets for got and plt. */
3193 - me->core_size = ALIGN(me->core_size, 4);
3194 - me->arch.got_offset = me->core_size;
3195 - me->core_size += me->arch.got_size;
3196 - me->arch.plt_offset = me->core_size;
3197 - me->core_size += me->arch.plt_size;
3198 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3199 + me->arch.got_offset = me->core_size_rw;
3200 + me->core_size_rw += me->arch.got_size;
3201 + me->arch.plt_offset = me->core_size_rx;
3202 + me->core_size_rx += me->arch.plt_size;
3203 return 0;
3204 }
3205
3206 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3207 if (info->got_initialized == 0) {
3208 Elf_Addr *gotent;
3209
3210 - gotent = me->module_core + me->arch.got_offset +
3211 + gotent = me->module_core_rw + me->arch.got_offset +
3212 info->got_offset;
3213 *gotent = val;
3214 info->got_initialized = 1;
3215 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3216 else if (r_type == R_390_GOTENT ||
3217 r_type == R_390_GOTPLTENT)
3218 *(unsigned int *) loc =
3219 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3220 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3221 else if (r_type == R_390_GOT64 ||
3222 r_type == R_390_GOTPLT64)
3223 *(unsigned long *) loc = val;
3224 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3225 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3226 if (info->plt_initialized == 0) {
3227 unsigned int *ip;
3228 - ip = me->module_core + me->arch.plt_offset +
3229 + ip = me->module_core_rx + me->arch.plt_offset +
3230 info->plt_offset;
3231 #ifndef CONFIG_64BIT
3232 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3233 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3234 val - loc + 0xffffUL < 0x1ffffeUL) ||
3235 (r_type == R_390_PLT32DBL &&
3236 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3237 - val = (Elf_Addr) me->module_core +
3238 + val = (Elf_Addr) me->module_core_rx +
3239 me->arch.plt_offset +
3240 info->plt_offset;
3241 val += rela->r_addend - loc;
3242 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3243 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3244 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3245 val = val + rela->r_addend -
3246 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3247 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3248 if (r_type == R_390_GOTOFF16)
3249 *(unsigned short *) loc = val;
3250 else if (r_type == R_390_GOTOFF32)
3251 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3252 break;
3253 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3254 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3255 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3256 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3257 rela->r_addend - loc;
3258 if (r_type == R_390_GOTPC)
3259 *(unsigned int *) loc = val;
3260 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3261 index 9451b21..ed8956f 100644
3262 --- a/arch/s390/kernel/process.c
3263 +++ b/arch/s390/kernel/process.c
3264 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3265 }
3266 return 0;
3267 }
3268 -
3269 -unsigned long arch_align_stack(unsigned long sp)
3270 -{
3271 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3272 - sp -= get_random_int() & ~PAGE_MASK;
3273 - return sp & ~0xf;
3274 -}
3275 -
3276 -static inline unsigned long brk_rnd(void)
3277 -{
3278 - /* 8MB for 32bit, 1GB for 64bit */
3279 - if (is_32bit_task())
3280 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3281 - else
3282 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3283 -}
3284 -
3285 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3286 -{
3287 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3288 -
3289 - if (ret < mm->brk)
3290 - return mm->brk;
3291 - return ret;
3292 -}
3293 -
3294 -unsigned long randomize_et_dyn(unsigned long base)
3295 -{
3296 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3297 -
3298 - if (!(current->flags & PF_RANDOMIZE))
3299 - return base;
3300 - if (ret < base)
3301 - return base;
3302 - return ret;
3303 -}
3304 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3305 index f09c748..cf9ec1d 100644
3306 --- a/arch/s390/mm/mmap.c
3307 +++ b/arch/s390/mm/mmap.c
3308 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3309 */
3310 if (mmap_is_legacy()) {
3311 mm->mmap_base = TASK_UNMAPPED_BASE;
3312 +
3313 +#ifdef CONFIG_PAX_RANDMMAP
3314 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3315 + mm->mmap_base += mm->delta_mmap;
3316 +#endif
3317 +
3318 mm->get_unmapped_area = arch_get_unmapped_area;
3319 mm->unmap_area = arch_unmap_area;
3320 } else {
3321 mm->mmap_base = mmap_base();
3322 +
3323 +#ifdef CONFIG_PAX_RANDMMAP
3324 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3325 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3326 +#endif
3327 +
3328 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3329 mm->unmap_area = arch_unmap_area_topdown;
3330 }
3331 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3332 */
3333 if (mmap_is_legacy()) {
3334 mm->mmap_base = TASK_UNMAPPED_BASE;
3335 +
3336 +#ifdef CONFIG_PAX_RANDMMAP
3337 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3338 + mm->mmap_base += mm->delta_mmap;
3339 +#endif
3340 +
3341 mm->get_unmapped_area = s390_get_unmapped_area;
3342 mm->unmap_area = arch_unmap_area;
3343 } else {
3344 mm->mmap_base = mmap_base();
3345 +
3346 +#ifdef CONFIG_PAX_RANDMMAP
3347 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3348 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3349 +#endif
3350 +
3351 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3352 mm->unmap_area = arch_unmap_area_topdown;
3353 }
3354 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3355 index 589d5c7..669e274 100644
3356 --- a/arch/score/include/asm/system.h
3357 +++ b/arch/score/include/asm/system.h
3358 @@ -17,7 +17,7 @@ do { \
3359 #define finish_arch_switch(prev) do {} while (0)
3360
3361 typedef void (*vi_handler_t)(void);
3362 -extern unsigned long arch_align_stack(unsigned long sp);
3363 +#define arch_align_stack(x) (x)
3364
3365 #define mb() barrier()
3366 #define rmb() barrier()
3367 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3368 index 25d0803..d6c8e36 100644
3369 --- a/arch/score/kernel/process.c
3370 +++ b/arch/score/kernel/process.c
3371 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3372
3373 return task_pt_regs(task)->cp0_epc;
3374 }
3375 -
3376 -unsigned long arch_align_stack(unsigned long sp)
3377 -{
3378 - return sp;
3379 -}
3380 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3381 index afeb710..d1d1289 100644
3382 --- a/arch/sh/mm/mmap.c
3383 +++ b/arch/sh/mm/mmap.c
3384 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3385 addr = PAGE_ALIGN(addr);
3386
3387 vma = find_vma(mm, addr);
3388 - if (TASK_SIZE - len >= addr &&
3389 - (!vma || addr + len <= vma->vm_start))
3390 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3391 return addr;
3392 }
3393
3394 @@ -106,7 +105,7 @@ full_search:
3395 }
3396 return -ENOMEM;
3397 }
3398 - if (likely(!vma || addr + len <= vma->vm_start)) {
3399 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3400 /*
3401 * Remember the place where we stopped the search:
3402 */
3403 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3404 addr = PAGE_ALIGN(addr);
3405
3406 vma = find_vma(mm, addr);
3407 - if (TASK_SIZE - len >= addr &&
3408 - (!vma || addr + len <= vma->vm_start))
3409 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3410 return addr;
3411 }
3412
3413 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3414 /* make sure it can fit in the remaining address space */
3415 if (likely(addr > len)) {
3416 vma = find_vma(mm, addr-len);
3417 - if (!vma || addr <= vma->vm_start) {
3418 + if (check_heap_stack_gap(vma, addr - len, len)) {
3419 /* remember the address as a hint for next time */
3420 return (mm->free_area_cache = addr-len);
3421 }
3422 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3423 if (unlikely(mm->mmap_base < len))
3424 goto bottomup;
3425
3426 - addr = mm->mmap_base-len;
3427 - if (do_colour_align)
3428 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3429 + addr = mm->mmap_base - len;
3430
3431 do {
3432 + if (do_colour_align)
3433 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3434 /*
3435 * Lookup failure means no vma is above this address,
3436 * else if new region fits below vma->vm_start,
3437 * return with success:
3438 */
3439 vma = find_vma(mm, addr);
3440 - if (likely(!vma || addr+len <= vma->vm_start)) {
3441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3442 /* remember the address as a hint for next time */
3443 return (mm->free_area_cache = addr);
3444 }
3445 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3446 mm->cached_hole_size = vma->vm_start - addr;
3447
3448 /* try just below the current vma->vm_start */
3449 - addr = vma->vm_start-len;
3450 - if (do_colour_align)
3451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3452 - } while (likely(len < vma->vm_start));
3453 + addr = skip_heap_stack_gap(vma, len);
3454 + } while (!IS_ERR_VALUE(addr));
3455
3456 bottomup:
3457 /*
3458 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
3459 index f92602e..27060b2 100644
3460 --- a/arch/sparc/Kconfig
3461 +++ b/arch/sparc/Kconfig
3462 @@ -31,6 +31,7 @@ config SPARC
3463
3464 config SPARC32
3465 def_bool !64BIT
3466 + select GENERIC_ATOMIC64
3467
3468 config SPARC64
3469 def_bool 64BIT
3470 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3471 index ad1fb5d..fc5315b 100644
3472 --- a/arch/sparc/Makefile
3473 +++ b/arch/sparc/Makefile
3474 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3475 # Export what is needed by arch/sparc/boot/Makefile
3476 export VMLINUX_INIT VMLINUX_MAIN
3477 VMLINUX_INIT := $(head-y) $(init-y)
3478 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3479 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3480 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3481 VMLINUX_MAIN += $(drivers-y) $(net-y)
3482
3483 diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
3484 index 8ff83d8..4a459c2 100644
3485 --- a/arch/sparc/include/asm/atomic.h
3486 +++ b/arch/sparc/include/asm/atomic.h
3487 @@ -4,5 +4,6 @@
3488 #include <asm/atomic_64.h>
3489 #else
3490 #include <asm/atomic_32.h>
3491 +#include <asm-generic/atomic64.h>
3492 #endif
3493 #endif
3494 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3495 index 9f421df..b81fc12 100644
3496 --- a/arch/sparc/include/asm/atomic_64.h
3497 +++ b/arch/sparc/include/asm/atomic_64.h
3498 @@ -14,18 +14,40 @@
3499 #define ATOMIC64_INIT(i) { (i) }
3500
3501 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3502 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3503 +{
3504 + return v->counter;
3505 +}
3506 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3507 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3508 +{
3509 + return v->counter;
3510 +}
3511
3512 #define atomic_set(v, i) (((v)->counter) = i)
3513 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3514 +{
3515 + v->counter = i;
3516 +}
3517 #define atomic64_set(v, i) (((v)->counter) = i)
3518 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3519 +{
3520 + v->counter = i;
3521 +}
3522
3523 extern void atomic_add(int, atomic_t *);
3524 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3525 extern void atomic64_add(long, atomic64_t *);
3526 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3527 extern void atomic_sub(int, atomic_t *);
3528 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3529 extern void atomic64_sub(long, atomic64_t *);
3530 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3531
3532 extern int atomic_add_ret(int, atomic_t *);
3533 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3534 extern long atomic64_add_ret(long, atomic64_t *);
3535 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3536 extern int atomic_sub_ret(int, atomic_t *);
3537 extern long atomic64_sub_ret(long, atomic64_t *);
3538
3539 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3540 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3541
3542 #define atomic_inc_return(v) atomic_add_ret(1, v)
3543 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3544 +{
3545 + return atomic_add_ret_unchecked(1, v);
3546 +}
3547 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3548 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3549 +{
3550 + return atomic64_add_ret_unchecked(1, v);
3551 +}
3552
3553 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3554 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3555
3556 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3557 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3558 +{
3559 + return atomic_add_ret_unchecked(i, v);
3560 +}
3561 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3562 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3563 +{
3564 + return atomic64_add_ret_unchecked(i, v);
3565 +}
3566
3567 /*
3568 * atomic_inc_and_test - increment and test
3569 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3570 * other cases.
3571 */
3572 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3573 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3574 +{
3575 + return atomic_inc_return_unchecked(v) == 0;
3576 +}
3577 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3578
3579 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3580 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3581 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3582
3583 #define atomic_inc(v) atomic_add(1, v)
3584 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3585 +{
3586 + atomic_add_unchecked(1, v);
3587 +}
3588 #define atomic64_inc(v) atomic64_add(1, v)
3589 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3590 +{
3591 + atomic64_add_unchecked(1, v);
3592 +}
3593
3594 #define atomic_dec(v) atomic_sub(1, v)
3595 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3596 +{
3597 + atomic_sub_unchecked(1, v);
3598 +}
3599 #define atomic64_dec(v) atomic64_sub(1, v)
3600 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3601 +{
3602 + atomic64_sub_unchecked(1, v);
3603 +}
3604
3605 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3606 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3607
3608 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3609 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3610 +{
3611 + return cmpxchg(&v->counter, old, new);
3612 +}
3613 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3614 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3615 +{
3616 + return xchg(&v->counter, new);
3617 +}
3618
3619 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3620 {
3621 - int c, old;
3622 + int c, old, new;
3623 c = atomic_read(v);
3624 for (;;) {
3625 - if (unlikely(c == (u)))
3626 + if (unlikely(c == u))
3627 break;
3628 - old = atomic_cmpxchg((v), c, c + (a));
3629 +
3630 + asm volatile("addcc %2, %0, %0\n"
3631 +
3632 +#ifdef CONFIG_PAX_REFCOUNT
3633 + "tvs %%icc, 6\n"
3634 +#endif
3635 +
3636 + : "=r" (new)
3637 + : "0" (c), "ir" (a)
3638 + : "cc");
3639 +
3640 + old = atomic_cmpxchg(v, c, new);
3641 if (likely(old == c))
3642 break;
3643 c = old;
3644 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3645 #define atomic64_cmpxchg(v, o, n) \
3646 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3647 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3648 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3649 +{
3650 + return xchg(&v->counter, new);
3651 +}
3652
3653 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3654 {
3655 - long c, old;
3656 + long c, old, new;
3657 c = atomic64_read(v);
3658 for (;;) {
3659 - if (unlikely(c == (u)))
3660 + if (unlikely(c == u))
3661 break;
3662 - old = atomic64_cmpxchg((v), c, c + (a));
3663 +
3664 + asm volatile("addcc %2, %0, %0\n"
3665 +
3666 +#ifdef CONFIG_PAX_REFCOUNT
3667 + "tvs %%xcc, 6\n"
3668 +#endif
3669 +
3670 + : "=r" (new)
3671 + : "0" (c), "ir" (a)
3672 + : "cc");
3673 +
3674 + old = atomic64_cmpxchg(v, c, new);
3675 if (likely(old == c))
3676 break;
3677 c = old;
3678 }
3679 - return c != (u);
3680 + return c != u;
3681 }
3682
3683 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3684 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3685 index 69358b5..17b4745 100644
3686 --- a/arch/sparc/include/asm/cache.h
3687 +++ b/arch/sparc/include/asm/cache.h
3688 @@ -10,7 +10,7 @@
3689 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3690
3691 #define L1_CACHE_SHIFT 5
3692 -#define L1_CACHE_BYTES 32
3693 +#define L1_CACHE_BYTES 32UL
3694
3695 #ifdef CONFIG_SPARC32
3696 #define SMP_CACHE_BYTES_SHIFT 5
3697 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3698 index 4269ca6..e3da77f 100644
3699 --- a/arch/sparc/include/asm/elf_32.h
3700 +++ b/arch/sparc/include/asm/elf_32.h
3701 @@ -114,6 +114,13 @@ typedef struct {
3702
3703 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3704
3705 +#ifdef CONFIG_PAX_ASLR
3706 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3707 +
3708 +#define PAX_DELTA_MMAP_LEN 16
3709 +#define PAX_DELTA_STACK_LEN 16
3710 +#endif
3711 +
3712 /* This yields a mask that user programs can use to figure out what
3713 instruction set this cpu supports. This can NOT be done in userspace
3714 on Sparc. */
3715 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3716 index 7df8b7f..4946269 100644
3717 --- a/arch/sparc/include/asm/elf_64.h
3718 +++ b/arch/sparc/include/asm/elf_64.h
3719 @@ -180,6 +180,13 @@ typedef struct {
3720 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3721 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3722
3723 +#ifdef CONFIG_PAX_ASLR
3724 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3725 +
3726 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3727 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3728 +#endif
3729 +
3730 extern unsigned long sparc64_elf_hwcap;
3731 #define ELF_HWCAP sparc64_elf_hwcap
3732
3733 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
3734 index 156707b..aefa786 100644
3735 --- a/arch/sparc/include/asm/page_32.h
3736 +++ b/arch/sparc/include/asm/page_32.h
3737 @@ -8,6 +8,8 @@
3738 #ifndef _SPARC_PAGE_H
3739 #define _SPARC_PAGE_H
3740
3741 +#include <linux/const.h>
3742 +
3743 #define PAGE_SHIFT 12
3744
3745 #ifndef __ASSEMBLY__
3746 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3747 index a790cc6..091ed94 100644
3748 --- a/arch/sparc/include/asm/pgtable_32.h
3749 +++ b/arch/sparc/include/asm/pgtable_32.h
3750 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3751 BTFIXUPDEF_INT(page_none)
3752 BTFIXUPDEF_INT(page_copy)
3753 BTFIXUPDEF_INT(page_readonly)
3754 +
3755 +#ifdef CONFIG_PAX_PAGEEXEC
3756 +BTFIXUPDEF_INT(page_shared_noexec)
3757 +BTFIXUPDEF_INT(page_copy_noexec)
3758 +BTFIXUPDEF_INT(page_readonly_noexec)
3759 +#endif
3760 +
3761 BTFIXUPDEF_INT(page_kernel)
3762
3763 #define PMD_SHIFT SUN4C_PMD_SHIFT
3764 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3765 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3766 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3767
3768 +#ifdef CONFIG_PAX_PAGEEXEC
3769 +extern pgprot_t PAGE_SHARED_NOEXEC;
3770 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3771 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3772 +#else
3773 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3774 +# define PAGE_COPY_NOEXEC PAGE_COPY
3775 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3776 +#endif
3777 +
3778 extern unsigned long page_kernel;
3779
3780 #ifdef MODULE
3781 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3782 index f6ae2b2..b03ffc7 100644
3783 --- a/arch/sparc/include/asm/pgtsrmmu.h
3784 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3785 @@ -115,6 +115,13 @@
3786 SRMMU_EXEC | SRMMU_REF)
3787 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3788 SRMMU_EXEC | SRMMU_REF)
3789 +
3790 +#ifdef CONFIG_PAX_PAGEEXEC
3791 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3792 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3793 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3794 +#endif
3795 +
3796 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3797 SRMMU_DIRTY | SRMMU_REF)
3798
3799 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3800 index 9689176..63c18ea 100644
3801 --- a/arch/sparc/include/asm/spinlock_64.h
3802 +++ b/arch/sparc/include/asm/spinlock_64.h
3803 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3804
3805 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3806
3807 -static void inline arch_read_lock(arch_rwlock_t *lock)
3808 +static inline void arch_read_lock(arch_rwlock_t *lock)
3809 {
3810 unsigned long tmp1, tmp2;
3811
3812 __asm__ __volatile__ (
3813 "1: ldsw [%2], %0\n"
3814 " brlz,pn %0, 2f\n"
3815 -"4: add %0, 1, %1\n"
3816 +"4: addcc %0, 1, %1\n"
3817 +
3818 +#ifdef CONFIG_PAX_REFCOUNT
3819 +" tvs %%icc, 6\n"
3820 +#endif
3821 +
3822 " cas [%2], %0, %1\n"
3823 " cmp %0, %1\n"
3824 " bne,pn %%icc, 1b\n"
3825 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3826 " .previous"
3827 : "=&r" (tmp1), "=&r" (tmp2)
3828 : "r" (lock)
3829 - : "memory");
3830 + : "memory", "cc");
3831 }
3832
3833 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3834 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3835 {
3836 int tmp1, tmp2;
3837
3838 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3839 "1: ldsw [%2], %0\n"
3840 " brlz,a,pn %0, 2f\n"
3841 " mov 0, %0\n"
3842 -" add %0, 1, %1\n"
3843 +" addcc %0, 1, %1\n"
3844 +
3845 +#ifdef CONFIG_PAX_REFCOUNT
3846 +" tvs %%icc, 6\n"
3847 +#endif
3848 +
3849 " cas [%2], %0, %1\n"
3850 " cmp %0, %1\n"
3851 " bne,pn %%icc, 1b\n"
3852 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3853 return tmp1;
3854 }
3855
3856 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3857 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3858 {
3859 unsigned long tmp1, tmp2;
3860
3861 __asm__ __volatile__(
3862 "1: lduw [%2], %0\n"
3863 -" sub %0, 1, %1\n"
3864 +" subcc %0, 1, %1\n"
3865 +
3866 +#ifdef CONFIG_PAX_REFCOUNT
3867 +" tvs %%icc, 6\n"
3868 +#endif
3869 +
3870 " cas [%2], %0, %1\n"
3871 " cmp %0, %1\n"
3872 " bne,pn %%xcc, 1b\n"
3873 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3874 : "memory");
3875 }
3876
3877 -static void inline arch_write_lock(arch_rwlock_t *lock)
3878 +static inline void arch_write_lock(arch_rwlock_t *lock)
3879 {
3880 unsigned long mask, tmp1, tmp2;
3881
3882 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3883 : "memory");
3884 }
3885
3886 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3887 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3888 {
3889 __asm__ __volatile__(
3890 " stw %%g0, [%0]"
3891 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3892 : "memory");
3893 }
3894
3895 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3896 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3897 {
3898 unsigned long mask, tmp1, tmp2, result;
3899
3900 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3901 index fa57532..e1a4c53 100644
3902 --- a/arch/sparc/include/asm/thread_info_32.h
3903 +++ b/arch/sparc/include/asm/thread_info_32.h
3904 @@ -50,6 +50,8 @@ struct thread_info {
3905 unsigned long w_saved;
3906
3907 struct restart_block restart_block;
3908 +
3909 + unsigned long lowest_stack;
3910 };
3911
3912 /*
3913 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3914 index 60d86be..952dea1 100644
3915 --- a/arch/sparc/include/asm/thread_info_64.h
3916 +++ b/arch/sparc/include/asm/thread_info_64.h
3917 @@ -63,6 +63,8 @@ struct thread_info {
3918 struct pt_regs *kern_una_regs;
3919 unsigned int kern_una_insn;
3920
3921 + unsigned long lowest_stack;
3922 +
3923 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3924 };
3925
3926 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3927 index e88fbe5..96b0ce5 100644
3928 --- a/arch/sparc/include/asm/uaccess.h
3929 +++ b/arch/sparc/include/asm/uaccess.h
3930 @@ -1,5 +1,13 @@
3931 #ifndef ___ASM_SPARC_UACCESS_H
3932 #define ___ASM_SPARC_UACCESS_H
3933 +
3934 +#ifdef __KERNEL__
3935 +#ifndef __ASSEMBLY__
3936 +#include <linux/types.h>
3937 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3938 +#endif
3939 +#endif
3940 +
3941 #if defined(__sparc__) && defined(__arch64__)
3942 #include <asm/uaccess_64.h>
3943 #else
3944 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3945 index 8303ac4..07f333d 100644
3946 --- a/arch/sparc/include/asm/uaccess_32.h
3947 +++ b/arch/sparc/include/asm/uaccess_32.h
3948 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3949
3950 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3951 {
3952 - if (n && __access_ok((unsigned long) to, n))
3953 + if ((long)n < 0)
3954 + return n;
3955 +
3956 + if (n && __access_ok((unsigned long) to, n)) {
3957 + if (!__builtin_constant_p(n))
3958 + check_object_size(from, n, true);
3959 return __copy_user(to, (__force void __user *) from, n);
3960 - else
3961 + } else
3962 return n;
3963 }
3964
3965 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3966 {
3967 + if ((long)n < 0)
3968 + return n;
3969 +
3970 + if (!__builtin_constant_p(n))
3971 + check_object_size(from, n, true);
3972 +
3973 return __copy_user(to, (__force void __user *) from, n);
3974 }
3975
3976 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3977 {
3978 - if (n && __access_ok((unsigned long) from, n))
3979 + if ((long)n < 0)
3980 + return n;
3981 +
3982 + if (n && __access_ok((unsigned long) from, n)) {
3983 + if (!__builtin_constant_p(n))
3984 + check_object_size(to, n, false);
3985 return __copy_user((__force void __user *) to, from, n);
3986 - else
3987 + } else
3988 return n;
3989 }
3990
3991 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3992 {
3993 + if ((long)n < 0)
3994 + return n;
3995 +
3996 return __copy_user((__force void __user *) to, from, n);
3997 }
3998
3999 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4000 index 3e1449f..5293a0e 100644
4001 --- a/arch/sparc/include/asm/uaccess_64.h
4002 +++ b/arch/sparc/include/asm/uaccess_64.h
4003 @@ -10,6 +10,7 @@
4004 #include <linux/compiler.h>
4005 #include <linux/string.h>
4006 #include <linux/thread_info.h>
4007 +#include <linux/kernel.h>
4008 #include <asm/asi.h>
4009 #include <asm/system.h>
4010 #include <asm/spitfire.h>
4011 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4012 static inline unsigned long __must_check
4013 copy_from_user(void *to, const void __user *from, unsigned long size)
4014 {
4015 - unsigned long ret = ___copy_from_user(to, from, size);
4016 + unsigned long ret;
4017
4018 + if ((long)size < 0 || size > INT_MAX)
4019 + return size;
4020 +
4021 + if (!__builtin_constant_p(size))
4022 + check_object_size(to, size, false);
4023 +
4024 + ret = ___copy_from_user(to, from, size);
4025 if (unlikely(ret))
4026 ret = copy_from_user_fixup(to, from, size);
4027
4028 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4029 static inline unsigned long __must_check
4030 copy_to_user(void __user *to, const void *from, unsigned long size)
4031 {
4032 - unsigned long ret = ___copy_to_user(to, from, size);
4033 + unsigned long ret;
4034
4035 + if ((long)size < 0 || size > INT_MAX)
4036 + return size;
4037 +
4038 + if (!__builtin_constant_p(size))
4039 + check_object_size(from, size, true);
4040 +
4041 + ret = ___copy_to_user(to, from, size);
4042 if (unlikely(ret))
4043 ret = copy_to_user_fixup(to, from, size);
4044 return ret;
4045 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4046 index cb85458..e063f17 100644
4047 --- a/arch/sparc/kernel/Makefile
4048 +++ b/arch/sparc/kernel/Makefile
4049 @@ -3,7 +3,7 @@
4050 #
4051
4052 asflags-y := -ansi
4053 -ccflags-y := -Werror
4054 +#ccflags-y := -Werror
4055
4056 extra-y := head_$(BITS).o
4057 extra-y += init_task.o
4058 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4059 index f793742..4d880af 100644
4060 --- a/arch/sparc/kernel/process_32.c
4061 +++ b/arch/sparc/kernel/process_32.c
4062 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
4063 rw->ins[4], rw->ins[5],
4064 rw->ins[6],
4065 rw->ins[7]);
4066 - printk("%pS\n", (void *) rw->ins[7]);
4067 + printk("%pA\n", (void *) rw->ins[7]);
4068 rw = (struct reg_window32 *) rw->ins[6];
4069 }
4070 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4071 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
4072
4073 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4074 r->psr, r->pc, r->npc, r->y, print_tainted());
4075 - printk("PC: <%pS>\n", (void *) r->pc);
4076 + printk("PC: <%pA>\n", (void *) r->pc);
4077 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4078 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4079 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4080 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4081 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4082 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4083 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4084 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4085
4086 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4087 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4088 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4089 rw = (struct reg_window32 *) fp;
4090 pc = rw->ins[7];
4091 printk("[%08lx : ", pc);
4092 - printk("%pS ] ", (void *) pc);
4093 + printk("%pA ] ", (void *) pc);
4094 fp = rw->ins[6];
4095 } while (++count < 16);
4096 printk("\n");
4097 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4098 index 3739a06..48b2ff0 100644
4099 --- a/arch/sparc/kernel/process_64.c
4100 +++ b/arch/sparc/kernel/process_64.c
4101 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4102 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4103 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4104 if (regs->tstate & TSTATE_PRIV)
4105 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4106 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4107 }
4108
4109 void show_regs(struct pt_regs *regs)
4110 {
4111 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4112 regs->tpc, regs->tnpc, regs->y, print_tainted());
4113 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4114 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4115 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4116 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4117 regs->u_regs[3]);
4118 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4119 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4120 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4121 regs->u_regs[15]);
4122 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4123 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4124 show_regwindow(regs);
4125 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
4126 }
4127 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
4128 ((tp && tp->task) ? tp->task->pid : -1));
4129
4130 if (gp->tstate & TSTATE_PRIV) {
4131 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4132 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4133 (void *) gp->tpc,
4134 (void *) gp->o7,
4135 (void *) gp->i7,
4136 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
4137 index 42b282f..28ce9f2 100644
4138 --- a/arch/sparc/kernel/sys_sparc_32.c
4139 +++ b/arch/sparc/kernel/sys_sparc_32.c
4140 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4141 if (ARCH_SUN4C && len > 0x20000000)
4142 return -ENOMEM;
4143 if (!addr)
4144 - addr = TASK_UNMAPPED_BASE;
4145 + addr = current->mm->mmap_base;
4146
4147 if (flags & MAP_SHARED)
4148 addr = COLOUR_ALIGN(addr);
4149 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4150 }
4151 if (TASK_SIZE - PAGE_SIZE - len < addr)
4152 return -ENOMEM;
4153 - if (!vmm || addr + len <= vmm->vm_start)
4154 + if (check_heap_stack_gap(vmm, addr, len))
4155 return addr;
4156 addr = vmm->vm_end;
4157 if (flags & MAP_SHARED)
4158 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
4159 index 441521a..b767073 100644
4160 --- a/arch/sparc/kernel/sys_sparc_64.c
4161 +++ b/arch/sparc/kernel/sys_sparc_64.c
4162 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4163 /* We do not accept a shared mapping if it would violate
4164 * cache aliasing constraints.
4165 */
4166 - if ((flags & MAP_SHARED) &&
4167 + if ((filp || (flags & MAP_SHARED)) &&
4168 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4169 return -EINVAL;
4170 return addr;
4171 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4172 if (filp || (flags & MAP_SHARED))
4173 do_color_align = 1;
4174
4175 +#ifdef CONFIG_PAX_RANDMMAP
4176 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4177 +#endif
4178 +
4179 if (addr) {
4180 if (do_color_align)
4181 addr = COLOUR_ALIGN(addr, pgoff);
4182 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4183 addr = PAGE_ALIGN(addr);
4184
4185 vma = find_vma(mm, addr);
4186 - if (task_size - len >= addr &&
4187 - (!vma || addr + len <= vma->vm_start))
4188 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4189 return addr;
4190 }
4191
4192 if (len > mm->cached_hole_size) {
4193 - start_addr = addr = mm->free_area_cache;
4194 + start_addr = addr = mm->free_area_cache;
4195 } else {
4196 - start_addr = addr = TASK_UNMAPPED_BASE;
4197 + start_addr = addr = mm->mmap_base;
4198 mm->cached_hole_size = 0;
4199 }
4200
4201 @@ -174,14 +177,14 @@ full_search:
4202 vma = find_vma(mm, VA_EXCLUDE_END);
4203 }
4204 if (unlikely(task_size < addr)) {
4205 - if (start_addr != TASK_UNMAPPED_BASE) {
4206 - start_addr = addr = TASK_UNMAPPED_BASE;
4207 + if (start_addr != mm->mmap_base) {
4208 + start_addr = addr = mm->mmap_base;
4209 mm->cached_hole_size = 0;
4210 goto full_search;
4211 }
4212 return -ENOMEM;
4213 }
4214 - if (likely(!vma || addr + len <= vma->vm_start)) {
4215 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /*
4217 * Remember the place where we stopped the search:
4218 */
4219 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4220 /* We do not accept a shared mapping if it would violate
4221 * cache aliasing constraints.
4222 */
4223 - if ((flags & MAP_SHARED) &&
4224 + if ((filp || (flags & MAP_SHARED)) &&
4225 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4226 return -EINVAL;
4227 return addr;
4228 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4229 addr = PAGE_ALIGN(addr);
4230
4231 vma = find_vma(mm, addr);
4232 - if (task_size - len >= addr &&
4233 - (!vma || addr + len <= vma->vm_start))
4234 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4235 return addr;
4236 }
4237
4238 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4239 /* make sure it can fit in the remaining address space */
4240 if (likely(addr > len)) {
4241 vma = find_vma(mm, addr-len);
4242 - if (!vma || addr <= vma->vm_start) {
4243 + if (check_heap_stack_gap(vma, addr - len, len)) {
4244 /* remember the address as a hint for next time */
4245 return (mm->free_area_cache = addr-len);
4246 }
4247 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4248 if (unlikely(mm->mmap_base < len))
4249 goto bottomup;
4250
4251 - addr = mm->mmap_base-len;
4252 - if (do_color_align)
4253 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4254 + addr = mm->mmap_base - len;
4255
4256 do {
4257 + if (do_color_align)
4258 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4259 /*
4260 * Lookup failure means no vma is above this address,
4261 * else if new region fits below vma->vm_start,
4262 * return with success:
4263 */
4264 vma = find_vma(mm, addr);
4265 - if (likely(!vma || addr+len <= vma->vm_start)) {
4266 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4267 /* remember the address as a hint for next time */
4268 return (mm->free_area_cache = addr);
4269 }
4270 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 mm->cached_hole_size = vma->vm_start - addr;
4272
4273 /* try just below the current vma->vm_start */
4274 - addr = vma->vm_start-len;
4275 - if (do_color_align)
4276 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4277 - } while (likely(len < vma->vm_start));
4278 + addr = skip_heap_stack_gap(vma, len);
4279 + } while (!IS_ERR_VALUE(addr));
4280
4281 bottomup:
4282 /*
4283 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4284 gap == RLIM_INFINITY ||
4285 sysctl_legacy_va_layout) {
4286 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4287 +
4288 +#ifdef CONFIG_PAX_RANDMMAP
4289 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4290 + mm->mmap_base += mm->delta_mmap;
4291 +#endif
4292 +
4293 mm->get_unmapped_area = arch_get_unmapped_area;
4294 mm->unmap_area = arch_unmap_area;
4295 } else {
4296 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4297 gap = (task_size / 6 * 5);
4298
4299 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4300 +
4301 +#ifdef CONFIG_PAX_RANDMMAP
4302 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4303 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4304 +#endif
4305 +
4306 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4307 mm->unmap_area = arch_unmap_area_topdown;
4308 }
4309 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4310 index 591f20c..0f1b925 100644
4311 --- a/arch/sparc/kernel/traps_32.c
4312 +++ b/arch/sparc/kernel/traps_32.c
4313 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4314 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4315 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4316
4317 +extern void gr_handle_kernel_exploit(void);
4318 +
4319 void die_if_kernel(char *str, struct pt_regs *regs)
4320 {
4321 static int die_counter;
4322 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4323 count++ < 30 &&
4324 (((unsigned long) rw) >= PAGE_OFFSET) &&
4325 !(((unsigned long) rw) & 0x7)) {
4326 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4327 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4328 (void *) rw->ins[7]);
4329 rw = (struct reg_window32 *)rw->ins[6];
4330 }
4331 }
4332 printk("Instruction DUMP:");
4333 instruction_dump ((unsigned long *) regs->pc);
4334 - if(regs->psr & PSR_PS)
4335 + if(regs->psr & PSR_PS) {
4336 + gr_handle_kernel_exploit();
4337 do_exit(SIGKILL);
4338 + }
4339 do_exit(SIGSEGV);
4340 }
4341
4342 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4343 index 0cbdaa4..438e4c9 100644
4344 --- a/arch/sparc/kernel/traps_64.c
4345 +++ b/arch/sparc/kernel/traps_64.c
4346 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4347 i + 1,
4348 p->trapstack[i].tstate, p->trapstack[i].tpc,
4349 p->trapstack[i].tnpc, p->trapstack[i].tt);
4350 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4351 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4352 }
4353 }
4354
4355 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4356
4357 lvl -= 0x100;
4358 if (regs->tstate & TSTATE_PRIV) {
4359 +
4360 +#ifdef CONFIG_PAX_REFCOUNT
4361 + if (lvl == 6)
4362 + pax_report_refcount_overflow(regs);
4363 +#endif
4364 +
4365 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4366 die_if_kernel(buffer, regs);
4367 }
4368 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4369 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4370 {
4371 char buffer[32];
4372 -
4373 +
4374 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4375 0, lvl, SIGTRAP) == NOTIFY_STOP)
4376 return;
4377
4378 +#ifdef CONFIG_PAX_REFCOUNT
4379 + if (lvl == 6)
4380 + pax_report_refcount_overflow(regs);
4381 +#endif
4382 +
4383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4384
4385 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4386 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4387 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4388 printk("%s" "ERROR(%d): ",
4389 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4390 - printk("TPC<%pS>\n", (void *) regs->tpc);
4391 + printk("TPC<%pA>\n", (void *) regs->tpc);
4392 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4393 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4394 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4395 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4396 smp_processor_id(),
4397 (type & 0x1) ? 'I' : 'D',
4398 regs->tpc);
4399 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4400 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4401 panic("Irrecoverable Cheetah+ parity error.");
4402 }
4403
4404 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4405 smp_processor_id(),
4406 (type & 0x1) ? 'I' : 'D',
4407 regs->tpc);
4408 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4409 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4410 }
4411
4412 struct sun4v_error_entry {
4413 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4414
4415 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4416 regs->tpc, tl);
4417 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4418 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4419 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4420 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4421 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4422 (void *) regs->u_regs[UREG_I7]);
4423 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4424 "pte[%lx] error[%lx]\n",
4425 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4426
4427 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4428 regs->tpc, tl);
4429 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4430 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4431 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4432 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4433 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4434 (void *) regs->u_regs[UREG_I7]);
4435 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4436 "pte[%lx] error[%lx]\n",
4437 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4438 fp = (unsigned long)sf->fp + STACK_BIAS;
4439 }
4440
4441 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4442 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4444 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4445 int index = tsk->curr_ret_stack;
4446 if (tsk->ret_stack && index >= graph) {
4447 pc = tsk->ret_stack[index - graph].ret;
4448 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4449 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4450 graph++;
4451 }
4452 }
4453 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4454 return (struct reg_window *) (fp + STACK_BIAS);
4455 }
4456
4457 +extern void gr_handle_kernel_exploit(void);
4458 +
4459 void die_if_kernel(char *str, struct pt_regs *regs)
4460 {
4461 static int die_counter;
4462 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4463 while (rw &&
4464 count++ < 30 &&
4465 kstack_valid(tp, (unsigned long) rw)) {
4466 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4467 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4468 (void *) rw->ins[7]);
4469
4470 rw = kernel_stack_up(rw);
4471 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4472 }
4473 user_instruction_dump ((unsigned int __user *) regs->tpc);
4474 }
4475 - if (regs->tstate & TSTATE_PRIV)
4476 + if (regs->tstate & TSTATE_PRIV) {
4477 + gr_handle_kernel_exploit();
4478 do_exit(SIGKILL);
4479 + }
4480 do_exit(SIGSEGV);
4481 }
4482 EXPORT_SYMBOL(die_if_kernel);
4483 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4484 index 76e4ac1..78f8bb1 100644
4485 --- a/arch/sparc/kernel/unaligned_64.c
4486 +++ b/arch/sparc/kernel/unaligned_64.c
4487 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4488 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4489
4490 if (__ratelimit(&ratelimit)) {
4491 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4492 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4493 regs->tpc, (void *) regs->tpc);
4494 }
4495 }
4496 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4497 index a3fc437..fea9957 100644
4498 --- a/arch/sparc/lib/Makefile
4499 +++ b/arch/sparc/lib/Makefile
4500 @@ -2,7 +2,7 @@
4501 #
4502
4503 asflags-y := -ansi -DST_DIV0=0x02
4504 -ccflags-y := -Werror
4505 +#ccflags-y := -Werror
4506
4507 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4508 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4509 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4510 index 59186e0..f747d7a 100644
4511 --- a/arch/sparc/lib/atomic_64.S
4512 +++ b/arch/sparc/lib/atomic_64.S
4513 @@ -18,7 +18,12 @@
4514 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4515 BACKOFF_SETUP(%o2)
4516 1: lduw [%o1], %g1
4517 - add %g1, %o0, %g7
4518 + addcc %g1, %o0, %g7
4519 +
4520 +#ifdef CONFIG_PAX_REFCOUNT
4521 + tvs %icc, 6
4522 +#endif
4523 +
4524 cas [%o1], %g1, %g7
4525 cmp %g1, %g7
4526 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4527 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4528 2: BACKOFF_SPIN(%o2, %o3, 1b)
4529 .size atomic_add, .-atomic_add
4530
4531 + .globl atomic_add_unchecked
4532 + .type atomic_add_unchecked,#function
4533 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4534 + BACKOFF_SETUP(%o2)
4535 +1: lduw [%o1], %g1
4536 + add %g1, %o0, %g7
4537 + cas [%o1], %g1, %g7
4538 + cmp %g1, %g7
4539 + bne,pn %icc, 2f
4540 + nop
4541 + retl
4542 + nop
4543 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4544 + .size atomic_add_unchecked, .-atomic_add_unchecked
4545 +
4546 .globl atomic_sub
4547 .type atomic_sub,#function
4548 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4549 BACKOFF_SETUP(%o2)
4550 1: lduw [%o1], %g1
4551 - sub %g1, %o0, %g7
4552 + subcc %g1, %o0, %g7
4553 +
4554 +#ifdef CONFIG_PAX_REFCOUNT
4555 + tvs %icc, 6
4556 +#endif
4557 +
4558 cas [%o1], %g1, %g7
4559 cmp %g1, %g7
4560 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4561 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4562 2: BACKOFF_SPIN(%o2, %o3, 1b)
4563 .size atomic_sub, .-atomic_sub
4564
4565 + .globl atomic_sub_unchecked
4566 + .type atomic_sub_unchecked,#function
4567 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4568 + BACKOFF_SETUP(%o2)
4569 +1: lduw [%o1], %g1
4570 + sub %g1, %o0, %g7
4571 + cas [%o1], %g1, %g7
4572 + cmp %g1, %g7
4573 + bne,pn %icc, 2f
4574 + nop
4575 + retl
4576 + nop
4577 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4578 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4579 +
4580 .globl atomic_add_ret
4581 .type atomic_add_ret,#function
4582 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4583 BACKOFF_SETUP(%o2)
4584 1: lduw [%o1], %g1
4585 - add %g1, %o0, %g7
4586 + addcc %g1, %o0, %g7
4587 +
4588 +#ifdef CONFIG_PAX_REFCOUNT
4589 + tvs %icc, 6
4590 +#endif
4591 +
4592 cas [%o1], %g1, %g7
4593 cmp %g1, %g7
4594 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4595 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4596 2: BACKOFF_SPIN(%o2, %o3, 1b)
4597 .size atomic_add_ret, .-atomic_add_ret
4598
4599 + .globl atomic_add_ret_unchecked
4600 + .type atomic_add_ret_unchecked,#function
4601 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4602 + BACKOFF_SETUP(%o2)
4603 +1: lduw [%o1], %g1
4604 + addcc %g1, %o0, %g7
4605 + cas [%o1], %g1, %g7
4606 + cmp %g1, %g7
4607 + bne,pn %icc, 2f
4608 + add %g7, %o0, %g7
4609 + sra %g7, 0, %o0
4610 + retl
4611 + nop
4612 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4613 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4614 +
4615 .globl atomic_sub_ret
4616 .type atomic_sub_ret,#function
4617 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4618 BACKOFF_SETUP(%o2)
4619 1: lduw [%o1], %g1
4620 - sub %g1, %o0, %g7
4621 + subcc %g1, %o0, %g7
4622 +
4623 +#ifdef CONFIG_PAX_REFCOUNT
4624 + tvs %icc, 6
4625 +#endif
4626 +
4627 cas [%o1], %g1, %g7
4628 cmp %g1, %g7
4629 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4630 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4631 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4632 BACKOFF_SETUP(%o2)
4633 1: ldx [%o1], %g1
4634 - add %g1, %o0, %g7
4635 + addcc %g1, %o0, %g7
4636 +
4637 +#ifdef CONFIG_PAX_REFCOUNT
4638 + tvs %xcc, 6
4639 +#endif
4640 +
4641 casx [%o1], %g1, %g7
4642 cmp %g1, %g7
4643 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4644 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4645 2: BACKOFF_SPIN(%o2, %o3, 1b)
4646 .size atomic64_add, .-atomic64_add
4647
4648 + .globl atomic64_add_unchecked
4649 + .type atomic64_add_unchecked,#function
4650 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4651 + BACKOFF_SETUP(%o2)
4652 +1: ldx [%o1], %g1
4653 + addcc %g1, %o0, %g7
4654 + casx [%o1], %g1, %g7
4655 + cmp %g1, %g7
4656 + bne,pn %xcc, 2f
4657 + nop
4658 + retl
4659 + nop
4660 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4661 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4662 +
4663 .globl atomic64_sub
4664 .type atomic64_sub,#function
4665 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4666 BACKOFF_SETUP(%o2)
4667 1: ldx [%o1], %g1
4668 - sub %g1, %o0, %g7
4669 + subcc %g1, %o0, %g7
4670 +
4671 +#ifdef CONFIG_PAX_REFCOUNT
4672 + tvs %xcc, 6
4673 +#endif
4674 +
4675 casx [%o1], %g1, %g7
4676 cmp %g1, %g7
4677 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4678 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4679 2: BACKOFF_SPIN(%o2, %o3, 1b)
4680 .size atomic64_sub, .-atomic64_sub
4681
4682 + .globl atomic64_sub_unchecked
4683 + .type atomic64_sub_unchecked,#function
4684 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4685 + BACKOFF_SETUP(%o2)
4686 +1: ldx [%o1], %g1
4687 + subcc %g1, %o0, %g7
4688 + casx [%o1], %g1, %g7
4689 + cmp %g1, %g7
4690 + bne,pn %xcc, 2f
4691 + nop
4692 + retl
4693 + nop
4694 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4695 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4696 +
4697 .globl atomic64_add_ret
4698 .type atomic64_add_ret,#function
4699 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4700 BACKOFF_SETUP(%o2)
4701 1: ldx [%o1], %g1
4702 - add %g1, %o0, %g7
4703 + addcc %g1, %o0, %g7
4704 +
4705 +#ifdef CONFIG_PAX_REFCOUNT
4706 + tvs %xcc, 6
4707 +#endif
4708 +
4709 casx [%o1], %g1, %g7
4710 cmp %g1, %g7
4711 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4712 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4713 2: BACKOFF_SPIN(%o2, %o3, 1b)
4714 .size atomic64_add_ret, .-atomic64_add_ret
4715
4716 + .globl atomic64_add_ret_unchecked
4717 + .type atomic64_add_ret_unchecked,#function
4718 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4719 + BACKOFF_SETUP(%o2)
4720 +1: ldx [%o1], %g1
4721 + addcc %g1, %o0, %g7
4722 + casx [%o1], %g1, %g7
4723 + cmp %g1, %g7
4724 + bne,pn %xcc, 2f
4725 + add %g7, %o0, %g7
4726 + mov %g7, %o0
4727 + retl
4728 + nop
4729 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4730 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4731 +
4732 .globl atomic64_sub_ret
4733 .type atomic64_sub_ret,#function
4734 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4735 BACKOFF_SETUP(%o2)
4736 1: ldx [%o1], %g1
4737 - sub %g1, %o0, %g7
4738 + subcc %g1, %o0, %g7
4739 +
4740 +#ifdef CONFIG_PAX_REFCOUNT
4741 + tvs %xcc, 6
4742 +#endif
4743 +
4744 casx [%o1], %g1, %g7
4745 cmp %g1, %g7
4746 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4747 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4748 index 1b30bb3..b4a16c7 100644
4749 --- a/arch/sparc/lib/ksyms.c
4750 +++ b/arch/sparc/lib/ksyms.c
4751 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4752
4753 /* Atomic counter implementation. */
4754 EXPORT_SYMBOL(atomic_add);
4755 +EXPORT_SYMBOL(atomic_add_unchecked);
4756 EXPORT_SYMBOL(atomic_add_ret);
4757 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4758 EXPORT_SYMBOL(atomic_sub);
4759 +EXPORT_SYMBOL(atomic_sub_unchecked);
4760 EXPORT_SYMBOL(atomic_sub_ret);
4761 EXPORT_SYMBOL(atomic64_add);
4762 +EXPORT_SYMBOL(atomic64_add_unchecked);
4763 EXPORT_SYMBOL(atomic64_add_ret);
4764 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4765 EXPORT_SYMBOL(atomic64_sub);
4766 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4767 EXPORT_SYMBOL(atomic64_sub_ret);
4768
4769 /* Atomic bit operations. */
4770 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4771 index 301421c..e2535d1 100644
4772 --- a/arch/sparc/mm/Makefile
4773 +++ b/arch/sparc/mm/Makefile
4774 @@ -2,7 +2,7 @@
4775 #
4776
4777 asflags-y := -ansi
4778 -ccflags-y := -Werror
4779 +#ccflags-y := -Werror
4780
4781 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4782 obj-y += fault_$(BITS).o
4783 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4784 index 8023fd7..c8e89e9 100644
4785 --- a/arch/sparc/mm/fault_32.c
4786 +++ b/arch/sparc/mm/fault_32.c
4787 @@ -21,6 +21,9 @@
4788 #include <linux/perf_event.h>
4789 #include <linux/interrupt.h>
4790 #include <linux/kdebug.h>
4791 +#include <linux/slab.h>
4792 +#include <linux/pagemap.h>
4793 +#include <linux/compiler.h>
4794
4795 #include <asm/system.h>
4796 #include <asm/page.h>
4797 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4798 return safe_compute_effective_address(regs, insn);
4799 }
4800
4801 +#ifdef CONFIG_PAX_PAGEEXEC
4802 +#ifdef CONFIG_PAX_DLRESOLVE
4803 +static void pax_emuplt_close(struct vm_area_struct *vma)
4804 +{
4805 + vma->vm_mm->call_dl_resolve = 0UL;
4806 +}
4807 +
4808 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4809 +{
4810 + unsigned int *kaddr;
4811 +
4812 + vmf->page = alloc_page(GFP_HIGHUSER);
4813 + if (!vmf->page)
4814 + return VM_FAULT_OOM;
4815 +
4816 + kaddr = kmap(vmf->page);
4817 + memset(kaddr, 0, PAGE_SIZE);
4818 + kaddr[0] = 0x9DE3BFA8U; /* save */
4819 + flush_dcache_page(vmf->page);
4820 + kunmap(vmf->page);
4821 + return VM_FAULT_MAJOR;
4822 +}
4823 +
4824 +static const struct vm_operations_struct pax_vm_ops = {
4825 + .close = pax_emuplt_close,
4826 + .fault = pax_emuplt_fault
4827 +};
4828 +
4829 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4830 +{
4831 + int ret;
4832 +
4833 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4834 + vma->vm_mm = current->mm;
4835 + vma->vm_start = addr;
4836 + vma->vm_end = addr + PAGE_SIZE;
4837 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4838 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4839 + vma->vm_ops = &pax_vm_ops;
4840 +
4841 + ret = insert_vm_struct(current->mm, vma);
4842 + if (ret)
4843 + return ret;
4844 +
4845 + ++current->mm->total_vm;
4846 + return 0;
4847 +}
4848 +#endif
4849 +
4850 +/*
4851 + * PaX: decide what to do with offenders (regs->pc = fault address)
4852 + *
4853 + * returns 1 when task should be killed
4854 + * 2 when patched PLT trampoline was detected
4855 + * 3 when unpatched PLT trampoline was detected
4856 + */
4857 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4858 +{
4859 +
4860 +#ifdef CONFIG_PAX_EMUPLT
4861 + int err;
4862 +
4863 + do { /* PaX: patched PLT emulation #1 */
4864 + unsigned int sethi1, sethi2, jmpl;
4865 +
4866 + err = get_user(sethi1, (unsigned int *)regs->pc);
4867 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4868 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4869 +
4870 + if (err)
4871 + break;
4872 +
4873 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4874 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4875 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4876 + {
4877 + unsigned int addr;
4878 +
4879 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4880 + addr = regs->u_regs[UREG_G1];
4881 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4882 + regs->pc = addr;
4883 + regs->npc = addr+4;
4884 + return 2;
4885 + }
4886 + } while (0);
4887 +
4888 + { /* PaX: patched PLT emulation #2 */
4889 + unsigned int ba;
4890 +
4891 + err = get_user(ba, (unsigned int *)regs->pc);
4892 +
4893 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4894 + unsigned int addr;
4895 +
4896 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4897 + regs->pc = addr;
4898 + regs->npc = addr+4;
4899 + return 2;
4900 + }
4901 + }
4902 +
4903 + do { /* PaX: patched PLT emulation #3 */
4904 + unsigned int sethi, jmpl, nop;
4905 +
4906 + err = get_user(sethi, (unsigned int *)regs->pc);
4907 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4908 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4909 +
4910 + if (err)
4911 + break;
4912 +
4913 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4914 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4915 + nop == 0x01000000U)
4916 + {
4917 + unsigned int addr;
4918 +
4919 + addr = (sethi & 0x003FFFFFU) << 10;
4920 + regs->u_regs[UREG_G1] = addr;
4921 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4922 + regs->pc = addr;
4923 + regs->npc = addr+4;
4924 + return 2;
4925 + }
4926 + } while (0);
4927 +
4928 + do { /* PaX: unpatched PLT emulation step 1 */
4929 + unsigned int sethi, ba, nop;
4930 +
4931 + err = get_user(sethi, (unsigned int *)regs->pc);
4932 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4933 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4934 +
4935 + if (err)
4936 + break;
4937 +
4938 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4939 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4940 + nop == 0x01000000U)
4941 + {
4942 + unsigned int addr, save, call;
4943 +
4944 + if ((ba & 0xFFC00000U) == 0x30800000U)
4945 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4946 + else
4947 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4948 +
4949 + err = get_user(save, (unsigned int *)addr);
4950 + err |= get_user(call, (unsigned int *)(addr+4));
4951 + err |= get_user(nop, (unsigned int *)(addr+8));
4952 + if (err)
4953 + break;
4954 +
4955 +#ifdef CONFIG_PAX_DLRESOLVE
4956 + if (save == 0x9DE3BFA8U &&
4957 + (call & 0xC0000000U) == 0x40000000U &&
4958 + nop == 0x01000000U)
4959 + {
4960 + struct vm_area_struct *vma;
4961 + unsigned long call_dl_resolve;
4962 +
4963 + down_read(&current->mm->mmap_sem);
4964 + call_dl_resolve = current->mm->call_dl_resolve;
4965 + up_read(&current->mm->mmap_sem);
4966 + if (likely(call_dl_resolve))
4967 + goto emulate;
4968 +
4969 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4970 +
4971 + down_write(&current->mm->mmap_sem);
4972 + if (current->mm->call_dl_resolve) {
4973 + call_dl_resolve = current->mm->call_dl_resolve;
4974 + up_write(&current->mm->mmap_sem);
4975 + if (vma)
4976 + kmem_cache_free(vm_area_cachep, vma);
4977 + goto emulate;
4978 + }
4979 +
4980 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4981 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4982 + up_write(&current->mm->mmap_sem);
4983 + if (vma)
4984 + kmem_cache_free(vm_area_cachep, vma);
4985 + return 1;
4986 + }
4987 +
4988 + if (pax_insert_vma(vma, call_dl_resolve)) {
4989 + up_write(&current->mm->mmap_sem);
4990 + kmem_cache_free(vm_area_cachep, vma);
4991 + return 1;
4992 + }
4993 +
4994 + current->mm->call_dl_resolve = call_dl_resolve;
4995 + up_write(&current->mm->mmap_sem);
4996 +
4997 +emulate:
4998 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4999 + regs->pc = call_dl_resolve;
5000 + regs->npc = addr+4;
5001 + return 3;
5002 + }
5003 +#endif
5004 +
5005 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5006 + if ((save & 0xFFC00000U) == 0x05000000U &&
5007 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5008 + nop == 0x01000000U)
5009 + {
5010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5011 + regs->u_regs[UREG_G2] = addr + 4;
5012 + addr = (save & 0x003FFFFFU) << 10;
5013 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5014 + regs->pc = addr;
5015 + regs->npc = addr+4;
5016 + return 3;
5017 + }
5018 + }
5019 + } while (0);
5020 +
5021 + do { /* PaX: unpatched PLT emulation step 2 */
5022 + unsigned int save, call, nop;
5023 +
5024 + err = get_user(save, (unsigned int *)(regs->pc-4));
5025 + err |= get_user(call, (unsigned int *)regs->pc);
5026 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5027 + if (err)
5028 + break;
5029 +
5030 + if (save == 0x9DE3BFA8U &&
5031 + (call & 0xC0000000U) == 0x40000000U &&
5032 + nop == 0x01000000U)
5033 + {
5034 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5035 +
5036 + regs->u_regs[UREG_RETPC] = regs->pc;
5037 + regs->pc = dl_resolve;
5038 + regs->npc = dl_resolve+4;
5039 + return 3;
5040 + }
5041 + } while (0);
5042 +#endif
5043 +
5044 + return 1;
5045 +}
5046 +
5047 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5048 +{
5049 + unsigned long i;
5050 +
5051 + printk(KERN_ERR "PAX: bytes at PC: ");
5052 + for (i = 0; i < 8; i++) {
5053 + unsigned int c;
5054 + if (get_user(c, (unsigned int *)pc+i))
5055 + printk(KERN_CONT "???????? ");
5056 + else
5057 + printk(KERN_CONT "%08x ", c);
5058 + }
5059 + printk("\n");
5060 +}
5061 +#endif
5062 +
5063 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
5064 int text_fault)
5065 {
5066 @@ -280,6 +545,24 @@ good_area:
5067 if(!(vma->vm_flags & VM_WRITE))
5068 goto bad_area;
5069 } else {
5070 +
5071 +#ifdef CONFIG_PAX_PAGEEXEC
5072 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5073 + up_read(&mm->mmap_sem);
5074 + switch (pax_handle_fetch_fault(regs)) {
5075 +
5076 +#ifdef CONFIG_PAX_EMUPLT
5077 + case 2:
5078 + case 3:
5079 + return;
5080 +#endif
5081 +
5082 + }
5083 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5084 + do_group_exit(SIGKILL);
5085 + }
5086 +#endif
5087 +
5088 /* Allow reads even for write-only mappings */
5089 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5090 goto bad_area;
5091 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
5092 index 504c062..6fcb9c6 100644
5093 --- a/arch/sparc/mm/fault_64.c
5094 +++ b/arch/sparc/mm/fault_64.c
5095 @@ -21,6 +21,9 @@
5096 #include <linux/kprobes.h>
5097 #include <linux/kdebug.h>
5098 #include <linux/percpu.h>
5099 +#include <linux/slab.h>
5100 +#include <linux/pagemap.h>
5101 +#include <linux/compiler.h>
5102
5103 #include <asm/page.h>
5104 #include <asm/pgtable.h>
5105 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
5106 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5107 regs->tpc);
5108 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5109 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5110 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5111 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5112 dump_stack();
5113 unhandled_fault(regs->tpc, current, regs);
5114 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
5115 show_regs(regs);
5116 }
5117
5118 +#ifdef CONFIG_PAX_PAGEEXEC
5119 +#ifdef CONFIG_PAX_DLRESOLVE
5120 +static void pax_emuplt_close(struct vm_area_struct *vma)
5121 +{
5122 + vma->vm_mm->call_dl_resolve = 0UL;
5123 +}
5124 +
5125 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5126 +{
5127 + unsigned int *kaddr;
5128 +
5129 + vmf->page = alloc_page(GFP_HIGHUSER);
5130 + if (!vmf->page)
5131 + return VM_FAULT_OOM;
5132 +
5133 + kaddr = kmap(vmf->page);
5134 + memset(kaddr, 0, PAGE_SIZE);
5135 + kaddr[0] = 0x9DE3BFA8U; /* save */
5136 + flush_dcache_page(vmf->page);
5137 + kunmap(vmf->page);
5138 + return VM_FAULT_MAJOR;
5139 +}
5140 +
5141 +static const struct vm_operations_struct pax_vm_ops = {
5142 + .close = pax_emuplt_close,
5143 + .fault = pax_emuplt_fault
5144 +};
5145 +
5146 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5147 +{
5148 + int ret;
5149 +
5150 + INIT_LIST_HEAD(&vma->anon_vma_chain);
5151 + vma->vm_mm = current->mm;
5152 + vma->vm_start = addr;
5153 + vma->vm_end = addr + PAGE_SIZE;
5154 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5155 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5156 + vma->vm_ops = &pax_vm_ops;
5157 +
5158 + ret = insert_vm_struct(current->mm, vma);
5159 + if (ret)
5160 + return ret;
5161 +
5162 + ++current->mm->total_vm;
5163 + return 0;
5164 +}
5165 +#endif
5166 +
5167 +/*
5168 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5169 + *
5170 + * returns 1 when task should be killed
5171 + * 2 when patched PLT trampoline was detected
5172 + * 3 when unpatched PLT trampoline was detected
5173 + */
5174 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5175 +{
5176 +
5177 +#ifdef CONFIG_PAX_EMUPLT
5178 + int err;
5179 +
5180 + do { /* PaX: patched PLT emulation #1 */
5181 + unsigned int sethi1, sethi2, jmpl;
5182 +
5183 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5184 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5185 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5186 +
5187 + if (err)
5188 + break;
5189 +
5190 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5191 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5192 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5193 + {
5194 + unsigned long addr;
5195 +
5196 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5197 + addr = regs->u_regs[UREG_G1];
5198 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5199 +
5200 + if (test_thread_flag(TIF_32BIT))
5201 + addr &= 0xFFFFFFFFUL;
5202 +
5203 + regs->tpc = addr;
5204 + regs->tnpc = addr+4;
5205 + return 2;
5206 + }
5207 + } while (0);
5208 +
5209 + { /* PaX: patched PLT emulation #2 */
5210 + unsigned int ba;
5211 +
5212 + err = get_user(ba, (unsigned int *)regs->tpc);
5213 +
5214 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5215 + unsigned long addr;
5216 +
5217 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5218 +
5219 + if (test_thread_flag(TIF_32BIT))
5220 + addr &= 0xFFFFFFFFUL;
5221 +
5222 + regs->tpc = addr;
5223 + regs->tnpc = addr+4;
5224 + return 2;
5225 + }
5226 + }
5227 +
5228 + do { /* PaX: patched PLT emulation #3 */
5229 + unsigned int sethi, jmpl, nop;
5230 +
5231 + err = get_user(sethi, (unsigned int *)regs->tpc);
5232 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5233 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5234 +
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5239 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5240 + nop == 0x01000000U)
5241 + {
5242 + unsigned long addr;
5243 +
5244 + addr = (sethi & 0x003FFFFFU) << 10;
5245 + regs->u_regs[UREG_G1] = addr;
5246 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5247 +
5248 + if (test_thread_flag(TIF_32BIT))
5249 + addr &= 0xFFFFFFFFUL;
5250 +
5251 + regs->tpc = addr;
5252 + regs->tnpc = addr+4;
5253 + return 2;
5254 + }
5255 + } while (0);
5256 +
5257 + do { /* PaX: patched PLT emulation #4 */
5258 + unsigned int sethi, mov1, call, mov2;
5259 +
5260 + err = get_user(sethi, (unsigned int *)regs->tpc);
5261 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5262 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5263 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5264 +
5265 + if (err)
5266 + break;
5267 +
5268 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5269 + mov1 == 0x8210000FU &&
5270 + (call & 0xC0000000U) == 0x40000000U &&
5271 + mov2 == 0x9E100001U)
5272 + {
5273 + unsigned long addr;
5274 +
5275 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5276 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5277 +
5278 + if (test_thread_flag(TIF_32BIT))
5279 + addr &= 0xFFFFFFFFUL;
5280 +
5281 + regs->tpc = addr;
5282 + regs->tnpc = addr+4;
5283 + return 2;
5284 + }
5285 + } while (0);
5286 +
5287 + do { /* PaX: patched PLT emulation #5 */
5288 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5289 +
5290 + err = get_user(sethi, (unsigned int *)regs->tpc);
5291 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5292 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5293 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5294 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5295 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5296 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5297 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5298 +
5299 + if (err)
5300 + break;
5301 +
5302 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5303 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5304 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5305 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5306 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5307 + sllx == 0x83287020U &&
5308 + jmpl == 0x81C04005U &&
5309 + nop == 0x01000000U)
5310 + {
5311 + unsigned long addr;
5312 +
5313 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5314 + regs->u_regs[UREG_G1] <<= 32;
5315 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5316 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5317 + regs->tpc = addr;
5318 + regs->tnpc = addr+4;
5319 + return 2;
5320 + }
5321 + } while (0);
5322 +
5323 + do { /* PaX: patched PLT emulation #6 */
5324 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5325 +
5326 + err = get_user(sethi, (unsigned int *)regs->tpc);
5327 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5328 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5329 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5330 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5331 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5332 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5333 +
5334 + if (err)
5335 + break;
5336 +
5337 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5338 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5339 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5340 + sllx == 0x83287020U &&
5341 + (or & 0xFFFFE000U) == 0x8A116000U &&
5342 + jmpl == 0x81C04005U &&
5343 + nop == 0x01000000U)
5344 + {
5345 + unsigned long addr;
5346 +
5347 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5348 + regs->u_regs[UREG_G1] <<= 32;
5349 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5350 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5351 + regs->tpc = addr;
5352 + regs->tnpc = addr+4;
5353 + return 2;
5354 + }
5355 + } while (0);
5356 +
5357 + do { /* PaX: unpatched PLT emulation step 1 */
5358 + unsigned int sethi, ba, nop;
5359 +
5360 + err = get_user(sethi, (unsigned int *)regs->tpc);
5361 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5362 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5363 +
5364 + if (err)
5365 + break;
5366 +
5367 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5368 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5369 + nop == 0x01000000U)
5370 + {
5371 + unsigned long addr;
5372 + unsigned int save, call;
5373 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5374 +
5375 + if ((ba & 0xFFC00000U) == 0x30800000U)
5376 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5377 + else
5378 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5379 +
5380 + if (test_thread_flag(TIF_32BIT))
5381 + addr &= 0xFFFFFFFFUL;
5382 +
5383 + err = get_user(save, (unsigned int *)addr);
5384 + err |= get_user(call, (unsigned int *)(addr+4));
5385 + err |= get_user(nop, (unsigned int *)(addr+8));
5386 + if (err)
5387 + break;
5388 +
5389 +#ifdef CONFIG_PAX_DLRESOLVE
5390 + if (save == 0x9DE3BFA8U &&
5391 + (call & 0xC0000000U) == 0x40000000U &&
5392 + nop == 0x01000000U)
5393 + {
5394 + struct vm_area_struct *vma;
5395 + unsigned long call_dl_resolve;
5396 +
5397 + down_read(&current->mm->mmap_sem);
5398 + call_dl_resolve = current->mm->call_dl_resolve;
5399 + up_read(&current->mm->mmap_sem);
5400 + if (likely(call_dl_resolve))
5401 + goto emulate;
5402 +
5403 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5404 +
5405 + down_write(&current->mm->mmap_sem);
5406 + if (current->mm->call_dl_resolve) {
5407 + call_dl_resolve = current->mm->call_dl_resolve;
5408 + up_write(&current->mm->mmap_sem);
5409 + if (vma)
5410 + kmem_cache_free(vm_area_cachep, vma);
5411 + goto emulate;
5412 + }
5413 +
5414 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5415 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5416 + up_write(&current->mm->mmap_sem);
5417 + if (vma)
5418 + kmem_cache_free(vm_area_cachep, vma);
5419 + return 1;
5420 + }
5421 +
5422 + if (pax_insert_vma(vma, call_dl_resolve)) {
5423 + up_write(&current->mm->mmap_sem);
5424 + kmem_cache_free(vm_area_cachep, vma);
5425 + return 1;
5426 + }
5427 +
5428 + current->mm->call_dl_resolve = call_dl_resolve;
5429 + up_write(&current->mm->mmap_sem);
5430 +
5431 +emulate:
5432 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5433 + regs->tpc = call_dl_resolve;
5434 + regs->tnpc = addr+4;
5435 + return 3;
5436 + }
5437 +#endif
5438 +
5439 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5440 + if ((save & 0xFFC00000U) == 0x05000000U &&
5441 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5442 + nop == 0x01000000U)
5443 + {
5444 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5445 + regs->u_regs[UREG_G2] = addr + 4;
5446 + addr = (save & 0x003FFFFFU) << 10;
5447 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5448 +
5449 + if (test_thread_flag(TIF_32BIT))
5450 + addr &= 0xFFFFFFFFUL;
5451 +
5452 + regs->tpc = addr;
5453 + regs->tnpc = addr+4;
5454 + return 3;
5455 + }
5456 +
5457 + /* PaX: 64-bit PLT stub */
5458 + err = get_user(sethi1, (unsigned int *)addr);
5459 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5460 + err |= get_user(or1, (unsigned int *)(addr+8));
5461 + err |= get_user(or2, (unsigned int *)(addr+12));
5462 + err |= get_user(sllx, (unsigned int *)(addr+16));
5463 + err |= get_user(add, (unsigned int *)(addr+20));
5464 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5465 + err |= get_user(nop, (unsigned int *)(addr+28));
5466 + if (err)
5467 + break;
5468 +
5469 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5470 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5471 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5472 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5473 + sllx == 0x89293020U &&
5474 + add == 0x8A010005U &&
5475 + jmpl == 0x89C14000U &&
5476 + nop == 0x01000000U)
5477 + {
5478 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5479 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5480 + regs->u_regs[UREG_G4] <<= 32;
5481 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5482 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5483 + regs->u_regs[UREG_G4] = addr + 24;
5484 + addr = regs->u_regs[UREG_G5];
5485 + regs->tpc = addr;
5486 + regs->tnpc = addr+4;
5487 + return 3;
5488 + }
5489 + }
5490 + } while (0);
5491 +
5492 +#ifdef CONFIG_PAX_DLRESOLVE
5493 + do { /* PaX: unpatched PLT emulation step 2 */
5494 + unsigned int save, call, nop;
5495 +
5496 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5497 + err |= get_user(call, (unsigned int *)regs->tpc);
5498 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5499 + if (err)
5500 + break;
5501 +
5502 + if (save == 0x9DE3BFA8U &&
5503 + (call & 0xC0000000U) == 0x40000000U &&
5504 + nop == 0x01000000U)
5505 + {
5506 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5507 +
5508 + if (test_thread_flag(TIF_32BIT))
5509 + dl_resolve &= 0xFFFFFFFFUL;
5510 +
5511 + regs->u_regs[UREG_RETPC] = regs->tpc;
5512 + regs->tpc = dl_resolve;
5513 + regs->tnpc = dl_resolve+4;
5514 + return 3;
5515 + }
5516 + } while (0);
5517 +#endif
5518 +
5519 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5520 + unsigned int sethi, ba, nop;
5521 +
5522 + err = get_user(sethi, (unsigned int *)regs->tpc);
5523 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5524 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5525 +
5526 + if (err)
5527 + break;
5528 +
5529 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5530 + (ba & 0xFFF00000U) == 0x30600000U &&
5531 + nop == 0x01000000U)
5532 + {
5533 + unsigned long addr;
5534 +
5535 + addr = (sethi & 0x003FFFFFU) << 10;
5536 + regs->u_regs[UREG_G1] = addr;
5537 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5538 +
5539 + if (test_thread_flag(TIF_32BIT))
5540 + addr &= 0xFFFFFFFFUL;
5541 +
5542 + regs->tpc = addr;
5543 + regs->tnpc = addr+4;
5544 + return 2;
5545 + }
5546 + } while (0);
5547 +
5548 +#endif
5549 +
5550 + return 1;
5551 +}
5552 +
5553 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5554 +{
5555 + unsigned long i;
5556 +
5557 + printk(KERN_ERR "PAX: bytes at PC: ");
5558 + for (i = 0; i < 8; i++) {
5559 + unsigned int c;
5560 + if (get_user(c, (unsigned int *)pc+i))
5561 + printk(KERN_CONT "???????? ");
5562 + else
5563 + printk(KERN_CONT "%08x ", c);
5564 + }
5565 + printk("\n");
5566 +}
5567 +#endif
5568 +
5569 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5570 {
5571 struct mm_struct *mm = current->mm;
5572 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5573 if (!vma)
5574 goto bad_area;
5575
5576 +#ifdef CONFIG_PAX_PAGEEXEC
5577 + /* PaX: detect ITLB misses on non-exec pages */
5578 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5579 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5580 + {
5581 + if (address != regs->tpc)
5582 + goto good_area;
5583 +
5584 + up_read(&mm->mmap_sem);
5585 + switch (pax_handle_fetch_fault(regs)) {
5586 +
5587 +#ifdef CONFIG_PAX_EMUPLT
5588 + case 2:
5589 + case 3:
5590 + return;
5591 +#endif
5592 +
5593 + }
5594 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5595 + do_group_exit(SIGKILL);
5596 + }
5597 +#endif
5598 +
5599 /* Pure DTLB misses do not tell us whether the fault causing
5600 * load/store/atomic was a write or not, it only says that there
5601 * was no match. So in such a case we (carefully) read the
5602 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5603 index 07e1453..0a7d9e9 100644
5604 --- a/arch/sparc/mm/hugetlbpage.c
5605 +++ b/arch/sparc/mm/hugetlbpage.c
5606 @@ -67,7 +67,7 @@ full_search:
5607 }
5608 return -ENOMEM;
5609 }
5610 - if (likely(!vma || addr + len <= vma->vm_start)) {
5611 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5612 /*
5613 * Remember the place where we stopped the search:
5614 */
5615 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5616 /* make sure it can fit in the remaining address space */
5617 if (likely(addr > len)) {
5618 vma = find_vma(mm, addr-len);
5619 - if (!vma || addr <= vma->vm_start) {
5620 + if (check_heap_stack_gap(vma, addr - len, len)) {
5621 /* remember the address as a hint for next time */
5622 return (mm->free_area_cache = addr-len);
5623 }
5624 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5625 if (unlikely(mm->mmap_base < len))
5626 goto bottomup;
5627
5628 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5629 + addr = mm->mmap_base - len;
5630
5631 do {
5632 + addr &= HPAGE_MASK;
5633 /*
5634 * Lookup failure means no vma is above this address,
5635 * else if new region fits below vma->vm_start,
5636 * return with success:
5637 */
5638 vma = find_vma(mm, addr);
5639 - if (likely(!vma || addr+len <= vma->vm_start)) {
5640 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5641 /* remember the address as a hint for next time */
5642 return (mm->free_area_cache = addr);
5643 }
5644 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5645 mm->cached_hole_size = vma->vm_start - addr;
5646
5647 /* try just below the current vma->vm_start */
5648 - addr = (vma->vm_start-len) & HPAGE_MASK;
5649 - } while (likely(len < vma->vm_start));
5650 + addr = skip_heap_stack_gap(vma, len);
5651 + } while (!IS_ERR_VALUE(addr));
5652
5653 bottomup:
5654 /*
5655 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5656 if (addr) {
5657 addr = ALIGN(addr, HPAGE_SIZE);
5658 vma = find_vma(mm, addr);
5659 - if (task_size - len >= addr &&
5660 - (!vma || addr + len <= vma->vm_start))
5661 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5662 return addr;
5663 }
5664 if (mm->get_unmapped_area == arch_get_unmapped_area)
5665 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5666 index 7b00de6..78239f4 100644
5667 --- a/arch/sparc/mm/init_32.c
5668 +++ b/arch/sparc/mm/init_32.c
5669 @@ -316,6 +316,9 @@ extern void device_scan(void);
5670 pgprot_t PAGE_SHARED __read_mostly;
5671 EXPORT_SYMBOL(PAGE_SHARED);
5672
5673 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5674 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5675 +
5676 void __init paging_init(void)
5677 {
5678 switch(sparc_cpu_model) {
5679 @@ -344,17 +347,17 @@ void __init paging_init(void)
5680
5681 /* Initialize the protection map with non-constant, MMU dependent values. */
5682 protection_map[0] = PAGE_NONE;
5683 - protection_map[1] = PAGE_READONLY;
5684 - protection_map[2] = PAGE_COPY;
5685 - protection_map[3] = PAGE_COPY;
5686 + protection_map[1] = PAGE_READONLY_NOEXEC;
5687 + protection_map[2] = PAGE_COPY_NOEXEC;
5688 + protection_map[3] = PAGE_COPY_NOEXEC;
5689 protection_map[4] = PAGE_READONLY;
5690 protection_map[5] = PAGE_READONLY;
5691 protection_map[6] = PAGE_COPY;
5692 protection_map[7] = PAGE_COPY;
5693 protection_map[8] = PAGE_NONE;
5694 - protection_map[9] = PAGE_READONLY;
5695 - protection_map[10] = PAGE_SHARED;
5696 - protection_map[11] = PAGE_SHARED;
5697 + protection_map[9] = PAGE_READONLY_NOEXEC;
5698 + protection_map[10] = PAGE_SHARED_NOEXEC;
5699 + protection_map[11] = PAGE_SHARED_NOEXEC;
5700 protection_map[12] = PAGE_READONLY;
5701 protection_map[13] = PAGE_READONLY;
5702 protection_map[14] = PAGE_SHARED;
5703 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5704 index cbef74e..c38fead 100644
5705 --- a/arch/sparc/mm/srmmu.c
5706 +++ b/arch/sparc/mm/srmmu.c
5707 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5708 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5709 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5710 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5711 +
5712 +#ifdef CONFIG_PAX_PAGEEXEC
5713 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5714 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5715 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5716 +#endif
5717 +
5718 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5719 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5720
5721 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
5722 index 27fe667..36d474c 100644
5723 --- a/arch/tile/include/asm/atomic_64.h
5724 +++ b/arch/tile/include/asm/atomic_64.h
5725 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5726
5727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5728
5729 +#define atomic64_read_unchecked(v) atomic64_read(v)
5730 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5731 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5732 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5733 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5734 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
5735 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5736 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
5737 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5738 +
5739 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
5740 #define smp_mb__before_atomic_dec() smp_mb()
5741 #define smp_mb__after_atomic_dec() smp_mb()
5742 diff --git a/arch/um/Makefile b/arch/um/Makefile
5743 index 7730af6..cce5b19 100644
5744 --- a/arch/um/Makefile
5745 +++ b/arch/um/Makefile
5746 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5747 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5748 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5749
5750 +ifdef CONSTIFY_PLUGIN
5751 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5752 +endif
5753 +
5754 #This will adjust *FLAGS accordingly to the platform.
5755 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5756
5757 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5758 index 6c03acd..a5e0215 100644
5759 --- a/arch/um/include/asm/kmap_types.h
5760 +++ b/arch/um/include/asm/kmap_types.h
5761 @@ -23,6 +23,7 @@ enum km_type {
5762 KM_IRQ1,
5763 KM_SOFTIRQ0,
5764 KM_SOFTIRQ1,
5765 + KM_CLEARPAGE,
5766 KM_TYPE_NR
5767 };
5768
5769 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5770 index 7cfc3ce..cbd1a58 100644
5771 --- a/arch/um/include/asm/page.h
5772 +++ b/arch/um/include/asm/page.h
5773 @@ -14,6 +14,9 @@
5774 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5775 #define PAGE_MASK (~(PAGE_SIZE-1))
5776
5777 +#define ktla_ktva(addr) (addr)
5778 +#define ktva_ktla(addr) (addr)
5779 +
5780 #ifndef __ASSEMBLY__
5781
5782 struct page;
5783 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5784 index c533835..84db18e 100644
5785 --- a/arch/um/kernel/process.c
5786 +++ b/arch/um/kernel/process.c
5787 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5788 return 2;
5789 }
5790
5791 -/*
5792 - * Only x86 and x86_64 have an arch_align_stack().
5793 - * All other arches have "#define arch_align_stack(x) (x)"
5794 - * in their asm/system.h
5795 - * As this is included in UML from asm-um/system-generic.h,
5796 - * we can use it to behave as the subarch does.
5797 - */
5798 -#ifndef arch_align_stack
5799 -unsigned long arch_align_stack(unsigned long sp)
5800 -{
5801 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5802 - sp -= get_random_int() % 8192;
5803 - return sp & ~0xf;
5804 -}
5805 -#endif
5806 -
5807 unsigned long get_wchan(struct task_struct *p)
5808 {
5809 unsigned long stack_page, sp, ip;
5810 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5811 index efb4294..61bc18c 100644
5812 --- a/arch/x86/Kconfig
5813 +++ b/arch/x86/Kconfig
5814 @@ -235,7 +235,7 @@ config X86_HT
5815
5816 config X86_32_LAZY_GS
5817 def_bool y
5818 - depends on X86_32 && !CC_STACKPROTECTOR
5819 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5820
5821 config ARCH_HWEIGHT_CFLAGS
5822 string
5823 @@ -1022,7 +1022,7 @@ choice
5824
5825 config NOHIGHMEM
5826 bool "off"
5827 - depends on !X86_NUMAQ
5828 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5829 ---help---
5830 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5831 However, the address space of 32-bit x86 processors is only 4
5832 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5833
5834 config HIGHMEM4G
5835 bool "4GB"
5836 - depends on !X86_NUMAQ
5837 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5838 ---help---
5839 Select this if you have a 32-bit processor and between 1 and 4
5840 gigabytes of physical RAM.
5841 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5842 hex
5843 default 0xB0000000 if VMSPLIT_3G_OPT
5844 default 0x80000000 if VMSPLIT_2G
5845 - default 0x78000000 if VMSPLIT_2G_OPT
5846 + default 0x70000000 if VMSPLIT_2G_OPT
5847 default 0x40000000 if VMSPLIT_1G
5848 default 0xC0000000
5849 depends on X86_32
5850 @@ -1496,6 +1496,7 @@ config SECCOMP
5851
5852 config CC_STACKPROTECTOR
5853 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5854 + depends on X86_64 || !PAX_MEMORY_UDEREF
5855 ---help---
5856 This option turns on the -fstack-protector GCC feature. This
5857 feature puts, at the beginning of functions, a canary value on
5858 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5859 config PHYSICAL_START
5860 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5861 default "0x1000000"
5862 + range 0x400000 0x40000000
5863 ---help---
5864 This gives the physical address where the kernel is loaded.
5865
5866 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5867 config PHYSICAL_ALIGN
5868 hex "Alignment value to which kernel should be aligned" if X86_32
5869 default "0x1000000"
5870 + range 0x400000 0x1000000 if PAX_KERNEXEC
5871 range 0x2000 0x1000000
5872 ---help---
5873 This value puts the alignment restrictions on physical address
5874 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5875 Say N if you want to disable CPU hotplug.
5876
5877 config COMPAT_VDSO
5878 - def_bool y
5879 + def_bool n
5880 prompt "Compat VDSO support"
5881 depends on X86_32 || IA32_EMULATION
5882 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5883 ---help---
5884 Map the 32-bit VDSO to the predictable old-style address too.
5885
5886 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5887 index e3ca7e0..b30b28a 100644
5888 --- a/arch/x86/Kconfig.cpu
5889 +++ b/arch/x86/Kconfig.cpu
5890 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5891
5892 config X86_F00F_BUG
5893 def_bool y
5894 - depends on M586MMX || M586TSC || M586 || M486 || M386
5895 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5896
5897 config X86_INVD_BUG
5898 def_bool y
5899 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5900
5901 config X86_ALIGNMENT_16
5902 def_bool y
5903 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5904 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5905
5906 config X86_INTEL_USERCOPY
5907 def_bool y
5908 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5909 # generates cmov.
5910 config X86_CMOV
5911 def_bool y
5912 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5913 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5914
5915 config X86_MINIMUM_CPU_FAMILY
5916 int
5917 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5918 index bf56e17..05f9891 100644
5919 --- a/arch/x86/Kconfig.debug
5920 +++ b/arch/x86/Kconfig.debug
5921 @@ -81,7 +81,7 @@ config X86_PTDUMP
5922 config DEBUG_RODATA
5923 bool "Write protect kernel read-only data structures"
5924 default y
5925 - depends on DEBUG_KERNEL
5926 + depends on DEBUG_KERNEL && BROKEN
5927 ---help---
5928 Mark the kernel read-only data as write-protected in the pagetables,
5929 in order to catch accidental (and incorrect) writes to such const
5930 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5931
5932 config DEBUG_SET_MODULE_RONX
5933 bool "Set loadable kernel module data as NX and text as RO"
5934 - depends on MODULES
5935 + depends on MODULES && BROKEN
5936 ---help---
5937 This option helps catch unintended modifications to loadable
5938 kernel module's text and read-only data. It also prevents execution
5939 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5940 index b02e509..2631e48 100644
5941 --- a/arch/x86/Makefile
5942 +++ b/arch/x86/Makefile
5943 @@ -46,6 +46,7 @@ else
5944 UTS_MACHINE := x86_64
5945 CHECKFLAGS += -D__x86_64__ -m64
5946
5947 + biarch := $(call cc-option,-m64)
5948 KBUILD_AFLAGS += -m64
5949 KBUILD_CFLAGS += -m64
5950
5951 @@ -195,3 +196,12 @@ define archhelp
5952 echo ' FDARGS="..." arguments for the booted kernel'
5953 echo ' FDINITRD=file initrd for the booted kernel'
5954 endef
5955 +
5956 +define OLD_LD
5957 +
5958 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5959 +*** Please upgrade your binutils to 2.18 or newer
5960 +endef
5961 +
5962 +archprepare:
5963 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5964 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5965 index 95365a8..52f857b 100644
5966 --- a/arch/x86/boot/Makefile
5967 +++ b/arch/x86/boot/Makefile
5968 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5969 $(call cc-option, -fno-stack-protector) \
5970 $(call cc-option, -mpreferred-stack-boundary=2)
5971 KBUILD_CFLAGS += $(call cc-option, -m32)
5972 +ifdef CONSTIFY_PLUGIN
5973 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5974 +endif
5975 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5976 GCOV_PROFILE := n
5977
5978 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5979 index 878e4b9..20537ab 100644
5980 --- a/arch/x86/boot/bitops.h
5981 +++ b/arch/x86/boot/bitops.h
5982 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5983 u8 v;
5984 const u32 *p = (const u32 *)addr;
5985
5986 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5987 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5988 return v;
5989 }
5990
5991 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5992
5993 static inline void set_bit(int nr, void *addr)
5994 {
5995 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5996 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5997 }
5998
5999 #endif /* BOOT_BITOPS_H */
6000 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
6001 index c7093bd..d4247ffe0 100644
6002 --- a/arch/x86/boot/boot.h
6003 +++ b/arch/x86/boot/boot.h
6004 @@ -85,7 +85,7 @@ static inline void io_delay(void)
6005 static inline u16 ds(void)
6006 {
6007 u16 seg;
6008 - asm("movw %%ds,%0" : "=rm" (seg));
6009 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6010 return seg;
6011 }
6012
6013 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
6014 static inline int memcmp(const void *s1, const void *s2, size_t len)
6015 {
6016 u8 diff;
6017 - asm("repe; cmpsb; setnz %0"
6018 + asm volatile("repe; cmpsb; setnz %0"
6019 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6020 return diff;
6021 }
6022 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
6023 index 09664ef..edc5d03 100644
6024 --- a/arch/x86/boot/compressed/Makefile
6025 +++ b/arch/x86/boot/compressed/Makefile
6026 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
6027 KBUILD_CFLAGS += $(cflags-y)
6028 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6029 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6030 +ifdef CONSTIFY_PLUGIN
6031 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6032 +endif
6033
6034 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6035 GCOV_PROFILE := n
6036 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
6037 index 67a655a..b924059 100644
6038 --- a/arch/x86/boot/compressed/head_32.S
6039 +++ b/arch/x86/boot/compressed/head_32.S
6040 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6041 notl %eax
6042 andl %eax, %ebx
6043 #else
6044 - movl $LOAD_PHYSICAL_ADDR, %ebx
6045 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6046 #endif
6047
6048 /* Target address to relocate to for decompression */
6049 @@ -162,7 +162,7 @@ relocated:
6050 * and where it was actually loaded.
6051 */
6052 movl %ebp, %ebx
6053 - subl $LOAD_PHYSICAL_ADDR, %ebx
6054 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6055 jz 2f /* Nothing to be done if loaded at compiled addr. */
6056 /*
6057 * Process relocations.
6058 @@ -170,8 +170,7 @@ relocated:
6059
6060 1: subl $4, %edi
6061 movl (%edi), %ecx
6062 - testl %ecx, %ecx
6063 - jz 2f
6064 + jecxz 2f
6065 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6066 jmp 1b
6067 2:
6068 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
6069 index 35af09d..99c9676 100644
6070 --- a/arch/x86/boot/compressed/head_64.S
6071 +++ b/arch/x86/boot/compressed/head_64.S
6072 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6073 notl %eax
6074 andl %eax, %ebx
6075 #else
6076 - movl $LOAD_PHYSICAL_ADDR, %ebx
6077 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6078 #endif
6079
6080 /* Target address to relocate to for decompression */
6081 @@ -233,7 +233,7 @@ ENTRY(startup_64)
6082 notq %rax
6083 andq %rax, %rbp
6084 #else
6085 - movq $LOAD_PHYSICAL_ADDR, %rbp
6086 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6087 #endif
6088
6089 /* Target address to relocate to for decompression */
6090 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
6091 index 3a19d04..7c1d55a 100644
6092 --- a/arch/x86/boot/compressed/misc.c
6093 +++ b/arch/x86/boot/compressed/misc.c
6094 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
6095 case PT_LOAD:
6096 #ifdef CONFIG_RELOCATABLE
6097 dest = output;
6098 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6099 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6100 #else
6101 dest = (void *)(phdr->p_paddr);
6102 #endif
6103 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
6104 error("Destination address too large");
6105 #endif
6106 #ifndef CONFIG_RELOCATABLE
6107 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6108 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6109 error("Wrong destination address");
6110 #endif
6111
6112 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
6113 index 89bbf4e..869908e 100644
6114 --- a/arch/x86/boot/compressed/relocs.c
6115 +++ b/arch/x86/boot/compressed/relocs.c
6116 @@ -13,8 +13,11 @@
6117
6118 static void die(char *fmt, ...);
6119
6120 +#include "../../../../include/generated/autoconf.h"
6121 +
6122 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6123 static Elf32_Ehdr ehdr;
6124 +static Elf32_Phdr *phdr;
6125 static unsigned long reloc_count, reloc_idx;
6126 static unsigned long *relocs;
6127
6128 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
6129 }
6130 }
6131
6132 +static void read_phdrs(FILE *fp)
6133 +{
6134 + unsigned int i;
6135 +
6136 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6137 + if (!phdr) {
6138 + die("Unable to allocate %d program headers\n",
6139 + ehdr.e_phnum);
6140 + }
6141 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6142 + die("Seek to %d failed: %s\n",
6143 + ehdr.e_phoff, strerror(errno));
6144 + }
6145 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6146 + die("Cannot read ELF program headers: %s\n",
6147 + strerror(errno));
6148 + }
6149 + for(i = 0; i < ehdr.e_phnum; i++) {
6150 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6151 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6152 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6153 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6154 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6155 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6156 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6157 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6158 + }
6159 +
6160 +}
6161 +
6162 static void read_shdrs(FILE *fp)
6163 {
6164 - int i;
6165 + unsigned int i;
6166 Elf32_Shdr shdr;
6167
6168 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6169 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
6170
6171 static void read_strtabs(FILE *fp)
6172 {
6173 - int i;
6174 + unsigned int i;
6175 for (i = 0; i < ehdr.e_shnum; i++) {
6176 struct section *sec = &secs[i];
6177 if (sec->shdr.sh_type != SHT_STRTAB) {
6178 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
6179
6180 static void read_symtabs(FILE *fp)
6181 {
6182 - int i,j;
6183 + unsigned int i,j;
6184 for (i = 0; i < ehdr.e_shnum; i++) {
6185 struct section *sec = &secs[i];
6186 if (sec->shdr.sh_type != SHT_SYMTAB) {
6187 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6188
6189 static void read_relocs(FILE *fp)
6190 {
6191 - int i,j;
6192 + unsigned int i,j;
6193 + uint32_t base;
6194 +
6195 for (i = 0; i < ehdr.e_shnum; i++) {
6196 struct section *sec = &secs[i];
6197 if (sec->shdr.sh_type != SHT_REL) {
6198 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6199 die("Cannot read symbol table: %s\n",
6200 strerror(errno));
6201 }
6202 + base = 0;
6203 + for (j = 0; j < ehdr.e_phnum; j++) {
6204 + if (phdr[j].p_type != PT_LOAD )
6205 + continue;
6206 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6207 + continue;
6208 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6209 + break;
6210 + }
6211 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6212 Elf32_Rel *rel = &sec->reltab[j];
6213 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6214 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6215 rel->r_info = elf32_to_cpu(rel->r_info);
6216 }
6217 }
6218 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6219
6220 static void print_absolute_symbols(void)
6221 {
6222 - int i;
6223 + unsigned int i;
6224 printf("Absolute symbols\n");
6225 printf(" Num: Value Size Type Bind Visibility Name\n");
6226 for (i = 0; i < ehdr.e_shnum; i++) {
6227 struct section *sec = &secs[i];
6228 char *sym_strtab;
6229 Elf32_Sym *sh_symtab;
6230 - int j;
6231 + unsigned int j;
6232
6233 if (sec->shdr.sh_type != SHT_SYMTAB) {
6234 continue;
6235 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6236
6237 static void print_absolute_relocs(void)
6238 {
6239 - int i, printed = 0;
6240 + unsigned int i, printed = 0;
6241
6242 for (i = 0; i < ehdr.e_shnum; i++) {
6243 struct section *sec = &secs[i];
6244 struct section *sec_applies, *sec_symtab;
6245 char *sym_strtab;
6246 Elf32_Sym *sh_symtab;
6247 - int j;
6248 + unsigned int j;
6249 if (sec->shdr.sh_type != SHT_REL) {
6250 continue;
6251 }
6252 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6253
6254 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6255 {
6256 - int i;
6257 + unsigned int i;
6258 /* Walk through the relocations */
6259 for (i = 0; i < ehdr.e_shnum; i++) {
6260 char *sym_strtab;
6261 Elf32_Sym *sh_symtab;
6262 struct section *sec_applies, *sec_symtab;
6263 - int j;
6264 + unsigned int j;
6265 struct section *sec = &secs[i];
6266
6267 if (sec->shdr.sh_type != SHT_REL) {
6268 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6269 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6270 continue;
6271 }
6272 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6273 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6274 + continue;
6275 +
6276 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6277 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6278 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6279 + continue;
6280 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6281 + continue;
6282 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6283 + continue;
6284 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6285 + continue;
6286 +#endif
6287 +
6288 switch (r_type) {
6289 case R_386_NONE:
6290 case R_386_PC32:
6291 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6292
6293 static void emit_relocs(int as_text)
6294 {
6295 - int i;
6296 + unsigned int i;
6297 /* Count how many relocations I have and allocate space for them. */
6298 reloc_count = 0;
6299 walk_relocs(count_reloc);
6300 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6301 fname, strerror(errno));
6302 }
6303 read_ehdr(fp);
6304 + read_phdrs(fp);
6305 read_shdrs(fp);
6306 read_strtabs(fp);
6307 read_symtabs(fp);
6308 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6309 index 4d3ff03..e4972ff 100644
6310 --- a/arch/x86/boot/cpucheck.c
6311 +++ b/arch/x86/boot/cpucheck.c
6312 @@ -74,7 +74,7 @@ static int has_fpu(void)
6313 u16 fcw = -1, fsw = -1;
6314 u32 cr0;
6315
6316 - asm("movl %%cr0,%0" : "=r" (cr0));
6317 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6318 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6319 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6320 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6321 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6322 {
6323 u32 f0, f1;
6324
6325 - asm("pushfl ; "
6326 + asm volatile("pushfl ; "
6327 "pushfl ; "
6328 "popl %0 ; "
6329 "movl %0,%1 ; "
6330 @@ -115,7 +115,7 @@ static void get_flags(void)
6331 set_bit(X86_FEATURE_FPU, cpu.flags);
6332
6333 if (has_eflag(X86_EFLAGS_ID)) {
6334 - asm("cpuid"
6335 + asm volatile("cpuid"
6336 : "=a" (max_intel_level),
6337 "=b" (cpu_vendor[0]),
6338 "=d" (cpu_vendor[1]),
6339 @@ -124,7 +124,7 @@ static void get_flags(void)
6340
6341 if (max_intel_level >= 0x00000001 &&
6342 max_intel_level <= 0x0000ffff) {
6343 - asm("cpuid"
6344 + asm volatile("cpuid"
6345 : "=a" (tfms),
6346 "=c" (cpu.flags[4]),
6347 "=d" (cpu.flags[0])
6348 @@ -136,7 +136,7 @@ static void get_flags(void)
6349 cpu.model += ((tfms >> 16) & 0xf) << 4;
6350 }
6351
6352 - asm("cpuid"
6353 + asm volatile("cpuid"
6354 : "=a" (max_amd_level)
6355 : "a" (0x80000000)
6356 : "ebx", "ecx", "edx");
6357 @@ -144,7 +144,7 @@ static void get_flags(void)
6358 if (max_amd_level >= 0x80000001 &&
6359 max_amd_level <= 0x8000ffff) {
6360 u32 eax = 0x80000001;
6361 - asm("cpuid"
6362 + asm volatile("cpuid"
6363 : "+a" (eax),
6364 "=c" (cpu.flags[6]),
6365 "=d" (cpu.flags[1])
6366 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6367 u32 ecx = MSR_K7_HWCR;
6368 u32 eax, edx;
6369
6370 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6371 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6372 eax &= ~(1 << 15);
6373 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6374 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6375
6376 get_flags(); /* Make sure it really did something */
6377 err = check_flags();
6378 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6379 u32 ecx = MSR_VIA_FCR;
6380 u32 eax, edx;
6381
6382 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6383 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6384 eax |= (1<<1)|(1<<7);
6385 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6386 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6387
6388 set_bit(X86_FEATURE_CX8, cpu.flags);
6389 err = check_flags();
6390 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6391 u32 eax, edx;
6392 u32 level = 1;
6393
6394 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6395 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6396 - asm("cpuid"
6397 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6398 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6399 + asm volatile("cpuid"
6400 : "+a" (level), "=d" (cpu.flags[0])
6401 : : "ecx", "ebx");
6402 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6403 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6404
6405 err = check_flags();
6406 }
6407 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6408 index bdb4d45..0476680 100644
6409 --- a/arch/x86/boot/header.S
6410 +++ b/arch/x86/boot/header.S
6411 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6412 # single linked list of
6413 # struct setup_data
6414
6415 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6416 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6417
6418 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6419 #define VO_INIT_SIZE (VO__end - VO__text)
6420 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6421 index db75d07..8e6d0af 100644
6422 --- a/arch/x86/boot/memory.c
6423 +++ b/arch/x86/boot/memory.c
6424 @@ -19,7 +19,7 @@
6425
6426 static int detect_memory_e820(void)
6427 {
6428 - int count = 0;
6429 + unsigned int count = 0;
6430 struct biosregs ireg, oreg;
6431 struct e820entry *desc = boot_params.e820_map;
6432 static struct e820entry buf; /* static so it is zeroed */
6433 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6434 index 11e8c6e..fdbb1ed 100644
6435 --- a/arch/x86/boot/video-vesa.c
6436 +++ b/arch/x86/boot/video-vesa.c
6437 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6438
6439 boot_params.screen_info.vesapm_seg = oreg.es;
6440 boot_params.screen_info.vesapm_off = oreg.di;
6441 + boot_params.screen_info.vesapm_size = oreg.cx;
6442 }
6443
6444 /*
6445 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6446 index 43eda28..5ab5fdb 100644
6447 --- a/arch/x86/boot/video.c
6448 +++ b/arch/x86/boot/video.c
6449 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6450 static unsigned int get_entry(void)
6451 {
6452 char entry_buf[4];
6453 - int i, len = 0;
6454 + unsigned int i, len = 0;
6455 int key;
6456 unsigned int v;
6457
6458 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6459 index 5b577d5..3c1fed4 100644
6460 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6461 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6462 @@ -8,6 +8,8 @@
6463 * including this sentence is retained in full.
6464 */
6465
6466 +#include <asm/alternative-asm.h>
6467 +
6468 .extern crypto_ft_tab
6469 .extern crypto_it_tab
6470 .extern crypto_fl_tab
6471 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6472 je B192; \
6473 leaq 32(r9),r9;
6474
6475 +#define ret pax_force_retaddr 0, 1; ret
6476 +
6477 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6478 movq r1,r2; \
6479 movq r3,r4; \
6480 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6481 index be6d9e3..21fbbca 100644
6482 --- a/arch/x86/crypto/aesni-intel_asm.S
6483 +++ b/arch/x86/crypto/aesni-intel_asm.S
6484 @@ -31,6 +31,7 @@
6485
6486 #include <linux/linkage.h>
6487 #include <asm/inst.h>
6488 +#include <asm/alternative-asm.h>
6489
6490 #ifdef __x86_64__
6491 .data
6492 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6493 pop %r14
6494 pop %r13
6495 pop %r12
6496 + pax_force_retaddr 0, 1
6497 ret
6498 +ENDPROC(aesni_gcm_dec)
6499
6500
6501 /*****************************************************************************
6502 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6503 pop %r14
6504 pop %r13
6505 pop %r12
6506 + pax_force_retaddr 0, 1
6507 ret
6508 +ENDPROC(aesni_gcm_enc)
6509
6510 #endif
6511
6512 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6513 pxor %xmm1, %xmm0
6514 movaps %xmm0, (TKEYP)
6515 add $0x10, TKEYP
6516 + pax_force_retaddr_bts
6517 ret
6518
6519 .align 4
6520 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6521 shufps $0b01001110, %xmm2, %xmm1
6522 movaps %xmm1, 0x10(TKEYP)
6523 add $0x20, TKEYP
6524 + pax_force_retaddr_bts
6525 ret
6526
6527 .align 4
6528 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6529
6530 movaps %xmm0, (TKEYP)
6531 add $0x10, TKEYP
6532 + pax_force_retaddr_bts
6533 ret
6534
6535 .align 4
6536 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6537 pxor %xmm1, %xmm2
6538 movaps %xmm2, (TKEYP)
6539 add $0x10, TKEYP
6540 + pax_force_retaddr_bts
6541 ret
6542
6543 /*
6544 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6545 #ifndef __x86_64__
6546 popl KEYP
6547 #endif
6548 + pax_force_retaddr 0, 1
6549 ret
6550 +ENDPROC(aesni_set_key)
6551
6552 /*
6553 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6554 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6555 popl KLEN
6556 popl KEYP
6557 #endif
6558 + pax_force_retaddr 0, 1
6559 ret
6560 +ENDPROC(aesni_enc)
6561
6562 /*
6563 * _aesni_enc1: internal ABI
6564 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6565 AESENC KEY STATE
6566 movaps 0x70(TKEYP), KEY
6567 AESENCLAST KEY STATE
6568 + pax_force_retaddr_bts
6569 ret
6570
6571 /*
6572 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6573 AESENCLAST KEY STATE2
6574 AESENCLAST KEY STATE3
6575 AESENCLAST KEY STATE4
6576 + pax_force_retaddr_bts
6577 ret
6578
6579 /*
6580 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6581 popl KLEN
6582 popl KEYP
6583 #endif
6584 + pax_force_retaddr 0, 1
6585 ret
6586 +ENDPROC(aesni_dec)
6587
6588 /*
6589 * _aesni_dec1: internal ABI
6590 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6591 AESDEC KEY STATE
6592 movaps 0x70(TKEYP), KEY
6593 AESDECLAST KEY STATE
6594 + pax_force_retaddr_bts
6595 ret
6596
6597 /*
6598 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6599 AESDECLAST KEY STATE2
6600 AESDECLAST KEY STATE3
6601 AESDECLAST KEY STATE4
6602 + pax_force_retaddr_bts
6603 ret
6604
6605 /*
6606 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6607 popl KEYP
6608 popl LEN
6609 #endif
6610 + pax_force_retaddr 0, 1
6611 ret
6612 +ENDPROC(aesni_ecb_enc)
6613
6614 /*
6615 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6616 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6617 popl KEYP
6618 popl LEN
6619 #endif
6620 + pax_force_retaddr 0, 1
6621 ret
6622 +ENDPROC(aesni_ecb_dec)
6623
6624 /*
6625 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6626 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6627 popl LEN
6628 popl IVP
6629 #endif
6630 + pax_force_retaddr 0, 1
6631 ret
6632 +ENDPROC(aesni_cbc_enc)
6633
6634 /*
6635 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6636 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6637 popl LEN
6638 popl IVP
6639 #endif
6640 + pax_force_retaddr 0, 1
6641 ret
6642 +ENDPROC(aesni_cbc_dec)
6643
6644 #ifdef __x86_64__
6645 .align 16
6646 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6647 mov $1, TCTR_LOW
6648 MOVQ_R64_XMM TCTR_LOW INC
6649 MOVQ_R64_XMM CTR TCTR_LOW
6650 + pax_force_retaddr_bts
6651 ret
6652
6653 /*
6654 @@ -2552,6 +2580,7 @@ _aesni_inc:
6655 .Linc_low:
6656 movaps CTR, IV
6657 PSHUFB_XMM BSWAP_MASK IV
6658 + pax_force_retaddr_bts
6659 ret
6660
6661 /*
6662 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6663 .Lctr_enc_ret:
6664 movups IV, (IVP)
6665 .Lctr_enc_just_ret:
6666 + pax_force_retaddr 0, 1
6667 ret
6668 +ENDPROC(aesni_ctr_enc)
6669 #endif
6670 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6671 index 391d245..67f35c2 100644
6672 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6673 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6674 @@ -20,6 +20,8 @@
6675 *
6676 */
6677
6678 +#include <asm/alternative-asm.h>
6679 +
6680 .file "blowfish-x86_64-asm.S"
6681 .text
6682
6683 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6684 jnz __enc_xor;
6685
6686 write_block();
6687 + pax_force_retaddr 0, 1
6688 ret;
6689 __enc_xor:
6690 xor_block();
6691 + pax_force_retaddr 0, 1
6692 ret;
6693
6694 .align 8
6695 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6696
6697 movq %r11, %rbp;
6698
6699 + pax_force_retaddr 0, 1
6700 ret;
6701
6702 /**********************************************************************
6703 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6704
6705 popq %rbx;
6706 popq %rbp;
6707 + pax_force_retaddr 0, 1
6708 ret;
6709
6710 __enc_xor4:
6711 @@ -349,6 +355,7 @@ __enc_xor4:
6712
6713 popq %rbx;
6714 popq %rbp;
6715 + pax_force_retaddr 0, 1
6716 ret;
6717
6718 .align 8
6719 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6720 popq %rbx;
6721 popq %rbp;
6722
6723 + pax_force_retaddr 0, 1
6724 ret;
6725
6726 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6727 index 6214a9b..1f4fc9a 100644
6728 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6729 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6730 @@ -1,3 +1,5 @@
6731 +#include <asm/alternative-asm.h>
6732 +
6733 # enter ECRYPT_encrypt_bytes
6734 .text
6735 .p2align 5
6736 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6737 add %r11,%rsp
6738 mov %rdi,%rax
6739 mov %rsi,%rdx
6740 + pax_force_retaddr 0, 1
6741 ret
6742 # bytesatleast65:
6743 ._bytesatleast65:
6744 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6745 add %r11,%rsp
6746 mov %rdi,%rax
6747 mov %rsi,%rdx
6748 + pax_force_retaddr
6749 ret
6750 # enter ECRYPT_ivsetup
6751 .text
6752 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6753 add %r11,%rsp
6754 mov %rdi,%rax
6755 mov %rsi,%rdx
6756 + pax_force_retaddr
6757 ret
6758 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6759 index b2c2f57..8470cab 100644
6760 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6761 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6762 @@ -28,6 +28,8 @@
6763 * (at your option) any later version.
6764 */
6765
6766 +#include <asm/alternative-asm.h>
6767 +
6768 #define CTX %rdi // arg1
6769 #define BUF %rsi // arg2
6770 #define CNT %rdx // arg3
6771 @@ -104,6 +106,7 @@
6772 pop %r12
6773 pop %rbp
6774 pop %rbx
6775 + pax_force_retaddr 0, 1
6776 ret
6777
6778 .size \name, .-\name
6779 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6780 index 5b012a2..36d5364 100644
6781 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6782 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6783 @@ -20,6 +20,8 @@
6784 *
6785 */
6786
6787 +#include <asm/alternative-asm.h>
6788 +
6789 .file "twofish-x86_64-asm-3way.S"
6790 .text
6791
6792 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6793 popq %r13;
6794 popq %r14;
6795 popq %r15;
6796 + pax_force_retaddr 0, 1
6797 ret;
6798
6799 __enc_xor3:
6800 @@ -271,6 +274,7 @@ __enc_xor3:
6801 popq %r13;
6802 popq %r14;
6803 popq %r15;
6804 + pax_force_retaddr 0, 1
6805 ret;
6806
6807 .global twofish_dec_blk_3way
6808 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6809 popq %r13;
6810 popq %r14;
6811 popq %r15;
6812 + pax_force_retaddr 0, 1
6813 ret;
6814
6815 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6816 index 7bcf3fc..f53832f 100644
6817 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6818 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6819 @@ -21,6 +21,7 @@
6820 .text
6821
6822 #include <asm/asm-offsets.h>
6823 +#include <asm/alternative-asm.h>
6824
6825 #define a_offset 0
6826 #define b_offset 4
6827 @@ -268,6 +269,7 @@ twofish_enc_blk:
6828
6829 popq R1
6830 movq $1,%rax
6831 + pax_force_retaddr 0, 1
6832 ret
6833
6834 twofish_dec_blk:
6835 @@ -319,4 +321,5 @@ twofish_dec_blk:
6836
6837 popq R1
6838 movq $1,%rax
6839 + pax_force_retaddr 0, 1
6840 ret
6841 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6842 index fd84387..0b4af7d 100644
6843 --- a/arch/x86/ia32/ia32_aout.c
6844 +++ b/arch/x86/ia32/ia32_aout.c
6845 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6846 unsigned long dump_start, dump_size;
6847 struct user32 dump;
6848
6849 + memset(&dump, 0, sizeof(dump));
6850 +
6851 fs = get_fs();
6852 set_fs(KERNEL_DS);
6853 has_dumped = 1;
6854 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6855 index 6557769..ef6ae89 100644
6856 --- a/arch/x86/ia32/ia32_signal.c
6857 +++ b/arch/x86/ia32/ia32_signal.c
6858 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6859 }
6860 seg = get_fs();
6861 set_fs(KERNEL_DS);
6862 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6863 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6864 set_fs(seg);
6865 if (ret >= 0 && uoss_ptr) {
6866 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6867 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6868 */
6869 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6870 size_t frame_size,
6871 - void **fpstate)
6872 + void __user **fpstate)
6873 {
6874 unsigned long sp;
6875
6876 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6877
6878 if (used_math()) {
6879 sp = sp - sig_xstate_ia32_size;
6880 - *fpstate = (struct _fpstate_ia32 *) sp;
6881 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6882 if (save_i387_xstate_ia32(*fpstate) < 0)
6883 return (void __user *) -1L;
6884 }
6885 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6886 sp -= frame_size;
6887 /* Align the stack pointer according to the i386 ABI,
6888 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6889 - sp = ((sp + 4) & -16ul) - 4;
6890 + sp = ((sp - 12) & -16ul) - 4;
6891 return (void __user *) sp;
6892 }
6893
6894 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6895 * These are actually not used anymore, but left because some
6896 * gdb versions depend on them as a marker.
6897 */
6898 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6899 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6900 } put_user_catch(err);
6901
6902 if (err)
6903 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6904 0xb8,
6905 __NR_ia32_rt_sigreturn,
6906 0x80cd,
6907 - 0,
6908 + 0
6909 };
6910
6911 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6912 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6913
6914 if (ka->sa.sa_flags & SA_RESTORER)
6915 restorer = ka->sa.sa_restorer;
6916 + else if (current->mm->context.vdso)
6917 + /* Return stub is in 32bit vsyscall page */
6918 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6919 else
6920 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6921 - rt_sigreturn);
6922 + restorer = &frame->retcode;
6923 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6924
6925 /*
6926 * Not actually used anymore, but left because some gdb
6927 * versions need it.
6928 */
6929 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6930 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6931 } put_user_catch(err);
6932
6933 if (err)
6934 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6935 index a6253ec..4ad2120 100644
6936 --- a/arch/x86/ia32/ia32entry.S
6937 +++ b/arch/x86/ia32/ia32entry.S
6938 @@ -13,7 +13,9 @@
6939 #include <asm/thread_info.h>
6940 #include <asm/segment.h>
6941 #include <asm/irqflags.h>
6942 +#include <asm/pgtable.h>
6943 #include <linux/linkage.h>
6944 +#include <asm/alternative-asm.h>
6945
6946 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6947 #include <linux/elf-em.h>
6948 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6949 ENDPROC(native_irq_enable_sysexit)
6950 #endif
6951
6952 + .macro pax_enter_kernel_user
6953 + pax_set_fptr_mask
6954 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6955 + call pax_enter_kernel_user
6956 +#endif
6957 + .endm
6958 +
6959 + .macro pax_exit_kernel_user
6960 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6961 + call pax_exit_kernel_user
6962 +#endif
6963 +#ifdef CONFIG_PAX_RANDKSTACK
6964 + pushq %rax
6965 + pushq %r11
6966 + call pax_randomize_kstack
6967 + popq %r11
6968 + popq %rax
6969 +#endif
6970 + .endm
6971 +
6972 +.macro pax_erase_kstack
6973 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6974 + call pax_erase_kstack
6975 +#endif
6976 +.endm
6977 +
6978 /*
6979 * 32bit SYSENTER instruction entry.
6980 *
6981 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6982 CFI_REGISTER rsp,rbp
6983 SWAPGS_UNSAFE_STACK
6984 movq PER_CPU_VAR(kernel_stack), %rsp
6985 - addq $(KERNEL_STACK_OFFSET),%rsp
6986 - /*
6987 - * No need to follow this irqs on/off section: the syscall
6988 - * disabled irqs, here we enable it straight after entry:
6989 - */
6990 - ENABLE_INTERRUPTS(CLBR_NONE)
6991 movl %ebp,%ebp /* zero extension */
6992 pushq_cfi $__USER32_DS
6993 /*CFI_REL_OFFSET ss,0*/
6994 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6995 CFI_REL_OFFSET rsp,0
6996 pushfq_cfi
6997 /*CFI_REL_OFFSET rflags,0*/
6998 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6999 - CFI_REGISTER rip,r10
7000 + orl $X86_EFLAGS_IF,(%rsp)
7001 + GET_THREAD_INFO(%r11)
7002 + movl TI_sysenter_return(%r11), %r11d
7003 + CFI_REGISTER rip,r11
7004 pushq_cfi $__USER32_CS
7005 /*CFI_REL_OFFSET cs,0*/
7006 movl %eax, %eax
7007 - pushq_cfi %r10
7008 + pushq_cfi %r11
7009 CFI_REL_OFFSET rip,0
7010 pushq_cfi %rax
7011 cld
7012 SAVE_ARGS 0,1,0
7013 + pax_enter_kernel_user
7014 + /*
7015 + * No need to follow this irqs on/off section: the syscall
7016 + * disabled irqs, here we enable it straight after entry:
7017 + */
7018 + ENABLE_INTERRUPTS(CLBR_NONE)
7019 /* no need to do an access_ok check here because rbp has been
7020 32bit zero extended */
7021 +
7022 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7023 + mov $PAX_USER_SHADOW_BASE,%r11
7024 + add %r11,%rbp
7025 +#endif
7026 +
7027 1: movl (%rbp),%ebp
7028 .section __ex_table,"a"
7029 .quad 1b,ia32_badarg
7030 .previous
7031 - GET_THREAD_INFO(%r10)
7032 - orl $TS_COMPAT,TI_status(%r10)
7033 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7034 + GET_THREAD_INFO(%r11)
7035 + orl $TS_COMPAT,TI_status(%r11)
7036 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7037 CFI_REMEMBER_STATE
7038 jnz sysenter_tracesys
7039 cmpq $(IA32_NR_syscalls-1),%rax
7040 @@ -162,13 +198,15 @@ sysenter_do_call:
7041 sysenter_dispatch:
7042 call *ia32_sys_call_table(,%rax,8)
7043 movq %rax,RAX-ARGOFFSET(%rsp)
7044 - GET_THREAD_INFO(%r10)
7045 + GET_THREAD_INFO(%r11)
7046 DISABLE_INTERRUPTS(CLBR_NONE)
7047 TRACE_IRQS_OFF
7048 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7049 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7050 jnz sysexit_audit
7051 sysexit_from_sys_call:
7052 - andl $~TS_COMPAT,TI_status(%r10)
7053 + pax_exit_kernel_user
7054 + pax_erase_kstack
7055 + andl $~TS_COMPAT,TI_status(%r11)
7056 /* clear IF, that popfq doesn't enable interrupts early */
7057 andl $~0x200,EFLAGS-R11(%rsp)
7058 movl RIP-R11(%rsp),%edx /* User %eip */
7059 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
7060 movl %eax,%esi /* 2nd arg: syscall number */
7061 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7062 call audit_syscall_entry
7063 +
7064 + pax_erase_kstack
7065 +
7066 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7067 cmpq $(IA32_NR_syscalls-1),%rax
7068 ja ia32_badsys
7069 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
7070 .endm
7071
7072 .macro auditsys_exit exit
7073 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7074 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7075 jnz ia32_ret_from_sys_call
7076 TRACE_IRQS_ON
7077 sti
7078 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
7079 movzbl %al,%edi /* zero-extend that into %edi */
7080 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7081 call audit_syscall_exit
7082 - GET_THREAD_INFO(%r10)
7083 + GET_THREAD_INFO(%r11)
7084 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
7085 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
7086 cli
7087 TRACE_IRQS_OFF
7088 - testl %edi,TI_flags(%r10)
7089 + testl %edi,TI_flags(%r11)
7090 jz \exit
7091 CLEAR_RREGS -ARGOFFSET
7092 jmp int_with_check
7093 @@ -238,7 +279,7 @@ sysexit_audit:
7094
7095 sysenter_tracesys:
7096 #ifdef CONFIG_AUDITSYSCALL
7097 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7098 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7099 jz sysenter_auditsys
7100 #endif
7101 SAVE_REST
7102 @@ -246,6 +287,9 @@ sysenter_tracesys:
7103 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
7104 movq %rsp,%rdi /* &pt_regs -> arg1 */
7105 call syscall_trace_enter
7106 +
7107 + pax_erase_kstack
7108 +
7109 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7110 RESTORE_REST
7111 cmpq $(IA32_NR_syscalls-1),%rax
7112 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
7113 ENTRY(ia32_cstar_target)
7114 CFI_STARTPROC32 simple
7115 CFI_SIGNAL_FRAME
7116 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
7117 + CFI_DEF_CFA rsp,0
7118 CFI_REGISTER rip,rcx
7119 /*CFI_REGISTER rflags,r11*/
7120 SWAPGS_UNSAFE_STACK
7121 movl %esp,%r8d
7122 CFI_REGISTER rsp,r8
7123 movq PER_CPU_VAR(kernel_stack),%rsp
7124 + SAVE_ARGS 8*6,0,0
7125 + pax_enter_kernel_user
7126 /*
7127 * No need to follow this irqs on/off section: the syscall
7128 * disabled irqs and here we enable it straight after entry:
7129 */
7130 ENABLE_INTERRUPTS(CLBR_NONE)
7131 - SAVE_ARGS 8,0,0
7132 movl %eax,%eax /* zero extension */
7133 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7134 movq %rcx,RIP-ARGOFFSET(%rsp)
7135 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
7136 /* no need to do an access_ok check here because r8 has been
7137 32bit zero extended */
7138 /* hardware stack frame is complete now */
7139 +
7140 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7141 + mov $PAX_USER_SHADOW_BASE,%r11
7142 + add %r11,%r8
7143 +#endif
7144 +
7145 1: movl (%r8),%r9d
7146 .section __ex_table,"a"
7147 .quad 1b,ia32_badarg
7148 .previous
7149 - GET_THREAD_INFO(%r10)
7150 - orl $TS_COMPAT,TI_status(%r10)
7151 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7152 + GET_THREAD_INFO(%r11)
7153 + orl $TS_COMPAT,TI_status(%r11)
7154 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7155 CFI_REMEMBER_STATE
7156 jnz cstar_tracesys
7157 cmpq $IA32_NR_syscalls-1,%rax
7158 @@ -321,13 +372,15 @@ cstar_do_call:
7159 cstar_dispatch:
7160 call *ia32_sys_call_table(,%rax,8)
7161 movq %rax,RAX-ARGOFFSET(%rsp)
7162 - GET_THREAD_INFO(%r10)
7163 + GET_THREAD_INFO(%r11)
7164 DISABLE_INTERRUPTS(CLBR_NONE)
7165 TRACE_IRQS_OFF
7166 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7167 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7168 jnz sysretl_audit
7169 sysretl_from_sys_call:
7170 - andl $~TS_COMPAT,TI_status(%r10)
7171 + pax_exit_kernel_user
7172 + pax_erase_kstack
7173 + andl $~TS_COMPAT,TI_status(%r11)
7174 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
7175 movl RIP-ARGOFFSET(%rsp),%ecx
7176 CFI_REGISTER rip,rcx
7177 @@ -355,7 +408,7 @@ sysretl_audit:
7178
7179 cstar_tracesys:
7180 #ifdef CONFIG_AUDITSYSCALL
7181 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7182 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7183 jz cstar_auditsys
7184 #endif
7185 xchgl %r9d,%ebp
7186 @@ -364,6 +417,9 @@ cstar_tracesys:
7187 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7188 movq %rsp,%rdi /* &pt_regs -> arg1 */
7189 call syscall_trace_enter
7190 +
7191 + pax_erase_kstack
7192 +
7193 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
7194 RESTORE_REST
7195 xchgl %ebp,%r9d
7196 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
7197 CFI_REL_OFFSET rip,RIP-RIP
7198 PARAVIRT_ADJUST_EXCEPTION_FRAME
7199 SWAPGS
7200 - /*
7201 - * No need to follow this irqs on/off section: the syscall
7202 - * disabled irqs and here we enable it straight after entry:
7203 - */
7204 - ENABLE_INTERRUPTS(CLBR_NONE)
7205 movl %eax,%eax
7206 pushq_cfi %rax
7207 cld
7208 /* note the registers are not zero extended to the sf.
7209 this could be a problem. */
7210 SAVE_ARGS 0,1,0
7211 - GET_THREAD_INFO(%r10)
7212 - orl $TS_COMPAT,TI_status(%r10)
7213 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7214 + pax_enter_kernel_user
7215 + /*
7216 + * No need to follow this irqs on/off section: the syscall
7217 + * disabled irqs and here we enable it straight after entry:
7218 + */
7219 + ENABLE_INTERRUPTS(CLBR_NONE)
7220 + GET_THREAD_INFO(%r11)
7221 + orl $TS_COMPAT,TI_status(%r11)
7222 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7223 jnz ia32_tracesys
7224 cmpq $(IA32_NR_syscalls-1),%rax
7225 ja ia32_badsys
7226 @@ -441,6 +498,9 @@ ia32_tracesys:
7227 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
7228 movq %rsp,%rdi /* &pt_regs -> arg1 */
7229 call syscall_trace_enter
7230 +
7231 + pax_erase_kstack
7232 +
7233 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
7234 RESTORE_REST
7235 cmpq $(IA32_NR_syscalls-1),%rax
7236 @@ -455,6 +515,7 @@ ia32_badsys:
7237
7238 quiet_ni_syscall:
7239 movq $-ENOSYS,%rax
7240 + pax_force_retaddr
7241 ret
7242 CFI_ENDPROC
7243
7244 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
7245 index f6f5c53..b358b28 100644
7246 --- a/arch/x86/ia32/sys_ia32.c
7247 +++ b/arch/x86/ia32/sys_ia32.c
7248 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
7249 */
7250 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
7251 {
7252 - typeof(ubuf->st_uid) uid = 0;
7253 - typeof(ubuf->st_gid) gid = 0;
7254 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7255 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7256 SET_UID(uid, stat->uid);
7257 SET_GID(gid, stat->gid);
7258 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7259 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7260 }
7261 set_fs(KERNEL_DS);
7262 ret = sys_rt_sigprocmask(how,
7263 - set ? (sigset_t __user *)&s : NULL,
7264 - oset ? (sigset_t __user *)&s : NULL,
7265 + set ? (sigset_t __force_user *)&s : NULL,
7266 + oset ? (sigset_t __force_user *)&s : NULL,
7267 sigsetsize);
7268 set_fs(old_fs);
7269 if (ret)
7270 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7271 return alarm_setitimer(seconds);
7272 }
7273
7274 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7275 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7276 int options)
7277 {
7278 return compat_sys_wait4(pid, stat_addr, options, NULL);
7279 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7280 mm_segment_t old_fs = get_fs();
7281
7282 set_fs(KERNEL_DS);
7283 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7284 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7285 set_fs(old_fs);
7286 if (put_compat_timespec(&t, interval))
7287 return -EFAULT;
7288 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7289 mm_segment_t old_fs = get_fs();
7290
7291 set_fs(KERNEL_DS);
7292 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7293 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7294 set_fs(old_fs);
7295 if (!ret) {
7296 switch (_NSIG_WORDS) {
7297 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7298 if (copy_siginfo_from_user32(&info, uinfo))
7299 return -EFAULT;
7300 set_fs(KERNEL_DS);
7301 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7302 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7303 set_fs(old_fs);
7304 return ret;
7305 }
7306 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7307 return -EFAULT;
7308
7309 set_fs(KERNEL_DS);
7310 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7311 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7312 count);
7313 set_fs(old_fs);
7314
7315 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7316 index 091508b..7692c6f 100644
7317 --- a/arch/x86/include/asm/alternative-asm.h
7318 +++ b/arch/x86/include/asm/alternative-asm.h
7319 @@ -4,10 +4,10 @@
7320
7321 #ifdef CONFIG_SMP
7322 .macro LOCK_PREFIX
7323 -1: lock
7324 +672: lock
7325 .section .smp_locks,"a"
7326 .balign 4
7327 - .long 1b - .
7328 + .long 672b - .
7329 .previous
7330 .endm
7331 #else
7332 @@ -15,6 +15,45 @@
7333 .endm
7334 #endif
7335
7336 +#ifdef KERNEXEC_PLUGIN
7337 + .macro pax_force_retaddr_bts rip=0
7338 + btsq $63,\rip(%rsp)
7339 + .endm
7340 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7341 + .macro pax_force_retaddr rip=0, reload=0
7342 + btsq $63,\rip(%rsp)
7343 + .endm
7344 + .macro pax_force_fptr ptr
7345 + btsq $63,\ptr
7346 + .endm
7347 + .macro pax_set_fptr_mask
7348 + .endm
7349 +#endif
7350 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7351 + .macro pax_force_retaddr rip=0, reload=0
7352 + .if \reload
7353 + pax_set_fptr_mask
7354 + .endif
7355 + orq %r10,\rip(%rsp)
7356 + .endm
7357 + .macro pax_force_fptr ptr
7358 + orq %r10,\ptr
7359 + .endm
7360 + .macro pax_set_fptr_mask
7361 + movabs $0x8000000000000000,%r10
7362 + .endm
7363 +#endif
7364 +#else
7365 + .macro pax_force_retaddr rip=0, reload=0
7366 + .endm
7367 + .macro pax_force_fptr ptr
7368 + .endm
7369 + .macro pax_force_retaddr_bts rip=0
7370 + .endm
7371 + .macro pax_set_fptr_mask
7372 + .endm
7373 +#endif
7374 +
7375 .macro altinstruction_entry orig alt feature orig_len alt_len
7376 .long \orig - .
7377 .long \alt - .
7378 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7379 index 37ad100..7d47faa 100644
7380 --- a/arch/x86/include/asm/alternative.h
7381 +++ b/arch/x86/include/asm/alternative.h
7382 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7383 ".section .discard,\"aw\",@progbits\n" \
7384 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7385 ".previous\n" \
7386 - ".section .altinstr_replacement, \"ax\"\n" \
7387 + ".section .altinstr_replacement, \"a\"\n" \
7388 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7389 ".previous"
7390
7391 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7392 index 1a6c09a..fec2432 100644
7393 --- a/arch/x86/include/asm/apic.h
7394 +++ b/arch/x86/include/asm/apic.h
7395 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7396
7397 #ifdef CONFIG_X86_LOCAL_APIC
7398
7399 -extern unsigned int apic_verbosity;
7400 +extern int apic_verbosity;
7401 extern int local_apic_timer_c2_ok;
7402
7403 extern int disable_apic;
7404 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7405 index 20370c6..a2eb9b0 100644
7406 --- a/arch/x86/include/asm/apm.h
7407 +++ b/arch/x86/include/asm/apm.h
7408 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7409 __asm__ __volatile__(APM_DO_ZERO_SEGS
7410 "pushl %%edi\n\t"
7411 "pushl %%ebp\n\t"
7412 - "lcall *%%cs:apm_bios_entry\n\t"
7413 + "lcall *%%ss:apm_bios_entry\n\t"
7414 "setc %%al\n\t"
7415 "popl %%ebp\n\t"
7416 "popl %%edi\n\t"
7417 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7418 __asm__ __volatile__(APM_DO_ZERO_SEGS
7419 "pushl %%edi\n\t"
7420 "pushl %%ebp\n\t"
7421 - "lcall *%%cs:apm_bios_entry\n\t"
7422 + "lcall *%%ss:apm_bios_entry\n\t"
7423 "setc %%bl\n\t"
7424 "popl %%ebp\n\t"
7425 "popl %%edi\n\t"
7426 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7427 index 58cb6d4..ca9010d 100644
7428 --- a/arch/x86/include/asm/atomic.h
7429 +++ b/arch/x86/include/asm/atomic.h
7430 @@ -22,7 +22,18 @@
7431 */
7432 static inline int atomic_read(const atomic_t *v)
7433 {
7434 - return (*(volatile int *)&(v)->counter);
7435 + return (*(volatile const int *)&(v)->counter);
7436 +}
7437 +
7438 +/**
7439 + * atomic_read_unchecked - read atomic variable
7440 + * @v: pointer of type atomic_unchecked_t
7441 + *
7442 + * Atomically reads the value of @v.
7443 + */
7444 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7445 +{
7446 + return (*(volatile const int *)&(v)->counter);
7447 }
7448
7449 /**
7450 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7451 }
7452
7453 /**
7454 + * atomic_set_unchecked - set atomic variable
7455 + * @v: pointer of type atomic_unchecked_t
7456 + * @i: required value
7457 + *
7458 + * Atomically sets the value of @v to @i.
7459 + */
7460 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7461 +{
7462 + v->counter = i;
7463 +}
7464 +
7465 +/**
7466 * atomic_add - add integer to atomic variable
7467 * @i: integer value to add
7468 * @v: pointer of type atomic_t
7469 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7470 */
7471 static inline void atomic_add(int i, atomic_t *v)
7472 {
7473 - asm volatile(LOCK_PREFIX "addl %1,%0"
7474 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7475 +
7476 +#ifdef CONFIG_PAX_REFCOUNT
7477 + "jno 0f\n"
7478 + LOCK_PREFIX "subl %1,%0\n"
7479 + "int $4\n0:\n"
7480 + _ASM_EXTABLE(0b, 0b)
7481 +#endif
7482 +
7483 + : "+m" (v->counter)
7484 + : "ir" (i));
7485 +}
7486 +
7487 +/**
7488 + * atomic_add_unchecked - add integer to atomic variable
7489 + * @i: integer value to add
7490 + * @v: pointer of type atomic_unchecked_t
7491 + *
7492 + * Atomically adds @i to @v.
7493 + */
7494 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7495 +{
7496 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7497 : "+m" (v->counter)
7498 : "ir" (i));
7499 }
7500 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7501 */
7502 static inline void atomic_sub(int i, atomic_t *v)
7503 {
7504 - asm volatile(LOCK_PREFIX "subl %1,%0"
7505 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7506 +
7507 +#ifdef CONFIG_PAX_REFCOUNT
7508 + "jno 0f\n"
7509 + LOCK_PREFIX "addl %1,%0\n"
7510 + "int $4\n0:\n"
7511 + _ASM_EXTABLE(0b, 0b)
7512 +#endif
7513 +
7514 + : "+m" (v->counter)
7515 + : "ir" (i));
7516 +}
7517 +
7518 +/**
7519 + * atomic_sub_unchecked - subtract integer from atomic variable
7520 + * @i: integer value to subtract
7521 + * @v: pointer of type atomic_unchecked_t
7522 + *
7523 + * Atomically subtracts @i from @v.
7524 + */
7525 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7528 : "+m" (v->counter)
7529 : "ir" (i));
7530 }
7531 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7536 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "addl %2,%0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "+m" (v->counter), "=qm" (c)
7547 : "ir" (i) : "memory");
7548 return c;
7549 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7550 */
7551 static inline void atomic_inc(atomic_t *v)
7552 {
7553 - asm volatile(LOCK_PREFIX "incl %0"
7554 + asm volatile(LOCK_PREFIX "incl %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decl %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + : "+m" (v->counter));
7564 +}
7565 +
7566 +/**
7567 + * atomic_inc_unchecked - increment atomic variable
7568 + * @v: pointer of type atomic_unchecked_t
7569 + *
7570 + * Atomically increments @v by 1.
7571 + */
7572 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7573 +{
7574 + asm volatile(LOCK_PREFIX "incl %0\n"
7575 : "+m" (v->counter));
7576 }
7577
7578 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7579 */
7580 static inline void atomic_dec(atomic_t *v)
7581 {
7582 - asm volatile(LOCK_PREFIX "decl %0"
7583 + asm volatile(LOCK_PREFIX "decl %0\n"
7584 +
7585 +#ifdef CONFIG_PAX_REFCOUNT
7586 + "jno 0f\n"
7587 + LOCK_PREFIX "incl %0\n"
7588 + "int $4\n0:\n"
7589 + _ASM_EXTABLE(0b, 0b)
7590 +#endif
7591 +
7592 + : "+m" (v->counter));
7593 +}
7594 +
7595 +/**
7596 + * atomic_dec_unchecked - decrement atomic variable
7597 + * @v: pointer of type atomic_unchecked_t
7598 + *
7599 + * Atomically decrements @v by 1.
7600 + */
7601 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7602 +{
7603 + asm volatile(LOCK_PREFIX "decl %0\n"
7604 : "+m" (v->counter));
7605 }
7606
7607 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7608 {
7609 unsigned char c;
7610
7611 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7612 + asm volatile(LOCK_PREFIX "decl %0\n"
7613 +
7614 +#ifdef CONFIG_PAX_REFCOUNT
7615 + "jno 0f\n"
7616 + LOCK_PREFIX "incl %0\n"
7617 + "int $4\n0:\n"
7618 + _ASM_EXTABLE(0b, 0b)
7619 +#endif
7620 +
7621 + "sete %1\n"
7622 : "+m" (v->counter), "=qm" (c)
7623 : : "memory");
7624 return c != 0;
7625 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7626 {
7627 unsigned char c;
7628
7629 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7630 + asm volatile(LOCK_PREFIX "incl %0\n"
7631 +
7632 +#ifdef CONFIG_PAX_REFCOUNT
7633 + "jno 0f\n"
7634 + LOCK_PREFIX "decl %0\n"
7635 + "int $4\n0:\n"
7636 + _ASM_EXTABLE(0b, 0b)
7637 +#endif
7638 +
7639 + "sete %1\n"
7640 + : "+m" (v->counter), "=qm" (c)
7641 + : : "memory");
7642 + return c != 0;
7643 +}
7644 +
7645 +/**
7646 + * atomic_inc_and_test_unchecked - increment and test
7647 + * @v: pointer of type atomic_unchecked_t
7648 + *
7649 + * Atomically increments @v by 1
7650 + * and returns true if the result is zero, or false for all
7651 + * other cases.
7652 + */
7653 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7654 +{
7655 + unsigned char c;
7656 +
7657 + asm volatile(LOCK_PREFIX "incl %0\n"
7658 + "sete %1\n"
7659 : "+m" (v->counter), "=qm" (c)
7660 : : "memory");
7661 return c != 0;
7662 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7663 {
7664 unsigned char c;
7665
7666 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7667 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7668 +
7669 +#ifdef CONFIG_PAX_REFCOUNT
7670 + "jno 0f\n"
7671 + LOCK_PREFIX "subl %2,%0\n"
7672 + "int $4\n0:\n"
7673 + _ASM_EXTABLE(0b, 0b)
7674 +#endif
7675 +
7676 + "sets %1\n"
7677 : "+m" (v->counter), "=qm" (c)
7678 : "ir" (i) : "memory");
7679 return c;
7680 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7681 goto no_xadd;
7682 #endif
7683 /* Modern 486+ processor */
7684 - return i + xadd(&v->counter, i);
7685 + return i + xadd_check_overflow(&v->counter, i);
7686
7687 #ifdef CONFIG_M386
7688 no_xadd: /* Legacy 386 processor */
7689 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7690 }
7691
7692 /**
7693 + * atomic_add_return_unchecked - add integer and return
7694 + * @i: integer value to add
7695 + * @v: pointer of type atomic_unchecked_t
7696 + *
7697 + * Atomically adds @i to @v and returns @i + @v
7698 + */
7699 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7700 +{
7701 +#ifdef CONFIG_M386
7702 + int __i;
7703 + unsigned long flags;
7704 + if (unlikely(boot_cpu_data.x86 <= 3))
7705 + goto no_xadd;
7706 +#endif
7707 + /* Modern 486+ processor */
7708 + return i + xadd(&v->counter, i);
7709 +
7710 +#ifdef CONFIG_M386
7711 +no_xadd: /* Legacy 386 processor */
7712 + raw_local_irq_save(flags);
7713 + __i = atomic_read_unchecked(v);
7714 + atomic_set_unchecked(v, i + __i);
7715 + raw_local_irq_restore(flags);
7716 + return i + __i;
7717 +#endif
7718 +}
7719 +
7720 +/**
7721 * atomic_sub_return - subtract integer and return
7722 * @v: pointer of type atomic_t
7723 * @i: integer value to subtract
7724 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7725 }
7726
7727 #define atomic_inc_return(v) (atomic_add_return(1, v))
7728 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7729 +{
7730 + return atomic_add_return_unchecked(1, v);
7731 +}
7732 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7733
7734 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7735 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7736 return cmpxchg(&v->counter, old, new);
7737 }
7738
7739 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7740 +{
7741 + return cmpxchg(&v->counter, old, new);
7742 +}
7743 +
7744 static inline int atomic_xchg(atomic_t *v, int new)
7745 {
7746 return xchg(&v->counter, new);
7747 }
7748
7749 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7750 +{
7751 + return xchg(&v->counter, new);
7752 +}
7753 +
7754 /**
7755 * __atomic_add_unless - add unless the number is already a given value
7756 * @v: pointer of type atomic_t
7757 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7758 */
7759 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7760 {
7761 - int c, old;
7762 + int c, old, new;
7763 c = atomic_read(v);
7764 for (;;) {
7765 - if (unlikely(c == (u)))
7766 + if (unlikely(c == u))
7767 break;
7768 - old = atomic_cmpxchg((v), c, c + (a));
7769 +
7770 + asm volatile("addl %2,%0\n"
7771 +
7772 +#ifdef CONFIG_PAX_REFCOUNT
7773 + "jno 0f\n"
7774 + "subl %2,%0\n"
7775 + "int $4\n0:\n"
7776 + _ASM_EXTABLE(0b, 0b)
7777 +#endif
7778 +
7779 + : "=r" (new)
7780 + : "0" (c), "ir" (a));
7781 +
7782 + old = atomic_cmpxchg(v, c, new);
7783 if (likely(old == c))
7784 break;
7785 c = old;
7786 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7787 return c;
7788 }
7789
7790 +/**
7791 + * atomic_inc_not_zero_hint - increment if not null
7792 + * @v: pointer of type atomic_t
7793 + * @hint: probable value of the atomic before the increment
7794 + *
7795 + * This version of atomic_inc_not_zero() gives a hint of probable
7796 + * value of the atomic. This helps processor to not read the memory
7797 + * before doing the atomic read/modify/write cycle, lowering
7798 + * number of bus transactions on some arches.
7799 + *
7800 + * Returns: 0 if increment was not done, 1 otherwise.
7801 + */
7802 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7803 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7804 +{
7805 + int val, c = hint, new;
7806 +
7807 + /* sanity test, should be removed by compiler if hint is a constant */
7808 + if (!hint)
7809 + return __atomic_add_unless(v, 1, 0);
7810 +
7811 + do {
7812 + asm volatile("incl %0\n"
7813 +
7814 +#ifdef CONFIG_PAX_REFCOUNT
7815 + "jno 0f\n"
7816 + "decl %0\n"
7817 + "int $4\n0:\n"
7818 + _ASM_EXTABLE(0b, 0b)
7819 +#endif
7820 +
7821 + : "=r" (new)
7822 + : "0" (c));
7823 +
7824 + val = atomic_cmpxchg(v, c, new);
7825 + if (val == c)
7826 + return 1;
7827 + c = val;
7828 + } while (c);
7829 +
7830 + return 0;
7831 +}
7832
7833 /*
7834 * atomic_dec_if_positive - decrement by 1 if old value positive
7835 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7836 index 24098aa..1e37723 100644
7837 --- a/arch/x86/include/asm/atomic64_32.h
7838 +++ b/arch/x86/include/asm/atomic64_32.h
7839 @@ -12,6 +12,14 @@ typedef struct {
7840 u64 __aligned(8) counter;
7841 } atomic64_t;
7842
7843 +#ifdef CONFIG_PAX_REFCOUNT
7844 +typedef struct {
7845 + u64 __aligned(8) counter;
7846 +} atomic64_unchecked_t;
7847 +#else
7848 +typedef atomic64_t atomic64_unchecked_t;
7849 +#endif
7850 +
7851 #define ATOMIC64_INIT(val) { (val) }
7852
7853 #ifdef CONFIG_X86_CMPXCHG64
7854 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7855 }
7856
7857 /**
7858 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7859 + * @p: pointer to type atomic64_unchecked_t
7860 + * @o: expected value
7861 + * @n: new value
7862 + *
7863 + * Atomically sets @v to @n if it was equal to @o and returns
7864 + * the old value.
7865 + */
7866 +
7867 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7868 +{
7869 + return cmpxchg64(&v->counter, o, n);
7870 +}
7871 +
7872 +/**
7873 * atomic64_xchg - xchg atomic64 variable
7874 * @v: pointer to type atomic64_t
7875 * @n: value to assign
7876 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7877 }
7878
7879 /**
7880 + * atomic64_set_unchecked - set atomic64 variable
7881 + * @v: pointer to type atomic64_unchecked_t
7882 + * @n: value to assign
7883 + *
7884 + * Atomically sets the value of @v to @n.
7885 + */
7886 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7887 +{
7888 + unsigned high = (unsigned)(i >> 32);
7889 + unsigned low = (unsigned)i;
7890 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7891 + : "+b" (low), "+c" (high)
7892 + : "S" (v)
7893 + : "eax", "edx", "memory"
7894 + );
7895 +}
7896 +
7897 +/**
7898 * atomic64_read - read atomic64 variable
7899 * @v: pointer to type atomic64_t
7900 *
7901 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7902 }
7903
7904 /**
7905 + * atomic64_read_unchecked - read atomic64 variable
7906 + * @v: pointer to type atomic64_unchecked_t
7907 + *
7908 + * Atomically reads the value of @v and returns it.
7909 + */
7910 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + long long r;
7913 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7914 + : "=A" (r), "+c" (v)
7915 + : : "memory"
7916 + );
7917 + return r;
7918 + }
7919 +
7920 +/**
7921 * atomic64_add_return - add and return
7922 * @i: integer value to add
7923 * @v: pointer to type atomic64_t
7924 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7925 return i;
7926 }
7927
7928 +/**
7929 + * atomic64_add_return_unchecked - add and return
7930 + * @i: integer value to add
7931 + * @v: pointer to type atomic64_unchecked_t
7932 + *
7933 + * Atomically adds @i to @v and returns @i + *@v
7934 + */
7935 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7936 +{
7937 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7938 + : "+A" (i), "+c" (v)
7939 + : : "memory"
7940 + );
7941 + return i;
7942 +}
7943 +
7944 /*
7945 * Other variants with different arithmetic operators:
7946 */
7947 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7948 return a;
7949 }
7950
7951 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7952 +{
7953 + long long a;
7954 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7955 + : "=A" (a)
7956 + : "S" (v)
7957 + : "memory", "ecx"
7958 + );
7959 + return a;
7960 +}
7961 +
7962 static inline long long atomic64_dec_return(atomic64_t *v)
7963 {
7964 long long a;
7965 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7966 }
7967
7968 /**
7969 + * atomic64_add_unchecked - add integer to atomic64 variable
7970 + * @i: integer value to add
7971 + * @v: pointer to type atomic64_unchecked_t
7972 + *
7973 + * Atomically adds @i to @v.
7974 + */
7975 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7976 +{
7977 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7978 + : "+A" (i), "+c" (v)
7979 + : : "memory"
7980 + );
7981 + return i;
7982 +}
7983 +
7984 +/**
7985 * atomic64_sub - subtract the atomic64 variable
7986 * @i: integer value to subtract
7987 * @v: pointer to type atomic64_t
7988 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7989 index 0e1cbfc..5623683 100644
7990 --- a/arch/x86/include/asm/atomic64_64.h
7991 +++ b/arch/x86/include/asm/atomic64_64.h
7992 @@ -18,7 +18,19 @@
7993 */
7994 static inline long atomic64_read(const atomic64_t *v)
7995 {
7996 - return (*(volatile long *)&(v)->counter);
7997 + return (*(volatile const long *)&(v)->counter);
7998 +}
7999 +
8000 +/**
8001 + * atomic64_read_unchecked - read atomic64 variable
8002 + * @v: pointer of type atomic64_unchecked_t
8003 + *
8004 + * Atomically reads the value of @v.
8005 + * Doesn't imply a read memory barrier.
8006 + */
8007 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8008 +{
8009 + return (*(volatile const long *)&(v)->counter);
8010 }
8011
8012 /**
8013 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
8014 }
8015
8016 /**
8017 + * atomic64_set_unchecked - set atomic64 variable
8018 + * @v: pointer to type atomic64_unchecked_t
8019 + * @i: required value
8020 + *
8021 + * Atomically sets the value of @v to @i.
8022 + */
8023 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8024 +{
8025 + v->counter = i;
8026 +}
8027 +
8028 +/**
8029 * atomic64_add - add integer to atomic64 variable
8030 * @i: integer value to add
8031 * @v: pointer to type atomic64_t
8032 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
8033 */
8034 static inline void atomic64_add(long i, atomic64_t *v)
8035 {
8036 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
8037 +
8038 +#ifdef CONFIG_PAX_REFCOUNT
8039 + "jno 0f\n"
8040 + LOCK_PREFIX "subq %1,%0\n"
8041 + "int $4\n0:\n"
8042 + _ASM_EXTABLE(0b, 0b)
8043 +#endif
8044 +
8045 + : "=m" (v->counter)
8046 + : "er" (i), "m" (v->counter));
8047 +}
8048 +
8049 +/**
8050 + * atomic64_add_unchecked - add integer to atomic64 variable
8051 + * @i: integer value to add
8052 + * @v: pointer to type atomic64_unchecked_t
8053 + *
8054 + * Atomically adds @i to @v.
8055 + */
8056 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
8057 +{
8058 asm volatile(LOCK_PREFIX "addq %1,%0"
8059 : "=m" (v->counter)
8060 : "er" (i), "m" (v->counter));
8061 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
8062 */
8063 static inline void atomic64_sub(long i, atomic64_t *v)
8064 {
8065 - asm volatile(LOCK_PREFIX "subq %1,%0"
8066 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8067 +
8068 +#ifdef CONFIG_PAX_REFCOUNT
8069 + "jno 0f\n"
8070 + LOCK_PREFIX "addq %1,%0\n"
8071 + "int $4\n0:\n"
8072 + _ASM_EXTABLE(0b, 0b)
8073 +#endif
8074 +
8075 + : "=m" (v->counter)
8076 + : "er" (i), "m" (v->counter));
8077 +}
8078 +
8079 +/**
8080 + * atomic64_sub_unchecked - subtract the atomic64 variable
8081 + * @i: integer value to subtract
8082 + * @v: pointer to type atomic64_unchecked_t
8083 + *
8084 + * Atomically subtracts @i from @v.
8085 + */
8086 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
8087 +{
8088 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
8089 : "=m" (v->counter)
8090 : "er" (i), "m" (v->counter));
8091 }
8092 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8093 {
8094 unsigned char c;
8095
8096 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
8097 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
8098 +
8099 +#ifdef CONFIG_PAX_REFCOUNT
8100 + "jno 0f\n"
8101 + LOCK_PREFIX "addq %2,%0\n"
8102 + "int $4\n0:\n"
8103 + _ASM_EXTABLE(0b, 0b)
8104 +#endif
8105 +
8106 + "sete %1\n"
8107 : "=m" (v->counter), "=qm" (c)
8108 : "er" (i), "m" (v->counter) : "memory");
8109 return c;
8110 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
8111 */
8112 static inline void atomic64_inc(atomic64_t *v)
8113 {
8114 + asm volatile(LOCK_PREFIX "incq %0\n"
8115 +
8116 +#ifdef CONFIG_PAX_REFCOUNT
8117 + "jno 0f\n"
8118 + LOCK_PREFIX "decq %0\n"
8119 + "int $4\n0:\n"
8120 + _ASM_EXTABLE(0b, 0b)
8121 +#endif
8122 +
8123 + : "=m" (v->counter)
8124 + : "m" (v->counter));
8125 +}
8126 +
8127 +/**
8128 + * atomic64_inc_unchecked - increment atomic64 variable
8129 + * @v: pointer to type atomic64_unchecked_t
8130 + *
8131 + * Atomically increments @v by 1.
8132 + */
8133 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8134 +{
8135 asm volatile(LOCK_PREFIX "incq %0"
8136 : "=m" (v->counter)
8137 : "m" (v->counter));
8138 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
8139 */
8140 static inline void atomic64_dec(atomic64_t *v)
8141 {
8142 - asm volatile(LOCK_PREFIX "decq %0"
8143 + asm volatile(LOCK_PREFIX "decq %0\n"
8144 +
8145 +#ifdef CONFIG_PAX_REFCOUNT
8146 + "jno 0f\n"
8147 + LOCK_PREFIX "incq %0\n"
8148 + "int $4\n0:\n"
8149 + _ASM_EXTABLE(0b, 0b)
8150 +#endif
8151 +
8152 + : "=m" (v->counter)
8153 + : "m" (v->counter));
8154 +}
8155 +
8156 +/**
8157 + * atomic64_dec_unchecked - decrement atomic64 variable
8158 + * @v: pointer to type atomic64_t
8159 + *
8160 + * Atomically decrements @v by 1.
8161 + */
8162 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8163 +{
8164 + asm volatile(LOCK_PREFIX "decq %0\n"
8165 : "=m" (v->counter)
8166 : "m" (v->counter));
8167 }
8168 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
8169 {
8170 unsigned char c;
8171
8172 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8173 + asm volatile(LOCK_PREFIX "decq %0\n"
8174 +
8175 +#ifdef CONFIG_PAX_REFCOUNT
8176 + "jno 0f\n"
8177 + LOCK_PREFIX "incq %0\n"
8178 + "int $4\n0:\n"
8179 + _ASM_EXTABLE(0b, 0b)
8180 +#endif
8181 +
8182 + "sete %1\n"
8183 : "=m" (v->counter), "=qm" (c)
8184 : "m" (v->counter) : "memory");
8185 return c != 0;
8186 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
8187 {
8188 unsigned char c;
8189
8190 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8191 + asm volatile(LOCK_PREFIX "incq %0\n"
8192 +
8193 +#ifdef CONFIG_PAX_REFCOUNT
8194 + "jno 0f\n"
8195 + LOCK_PREFIX "decq %0\n"
8196 + "int $4\n0:\n"
8197 + _ASM_EXTABLE(0b, 0b)
8198 +#endif
8199 +
8200 + "sete %1\n"
8201 : "=m" (v->counter), "=qm" (c)
8202 : "m" (v->counter) : "memory");
8203 return c != 0;
8204 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8205 {
8206 unsigned char c;
8207
8208 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8209 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8210 +
8211 +#ifdef CONFIG_PAX_REFCOUNT
8212 + "jno 0f\n"
8213 + LOCK_PREFIX "subq %2,%0\n"
8214 + "int $4\n0:\n"
8215 + _ASM_EXTABLE(0b, 0b)
8216 +#endif
8217 +
8218 + "sets %1\n"
8219 : "=m" (v->counter), "=qm" (c)
8220 : "er" (i), "m" (v->counter) : "memory");
8221 return c;
8222 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
8223 */
8224 static inline long atomic64_add_return(long i, atomic64_t *v)
8225 {
8226 + return i + xadd_check_overflow(&v->counter, i);
8227 +}
8228 +
8229 +/**
8230 + * atomic64_add_return_unchecked - add and return
8231 + * @i: integer value to add
8232 + * @v: pointer to type atomic64_unchecked_t
8233 + *
8234 + * Atomically adds @i to @v and returns @i + @v
8235 + */
8236 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8237 +{
8238 return i + xadd(&v->counter, i);
8239 }
8240
8241 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
8242 }
8243
8244 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8245 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8246 +{
8247 + return atomic64_add_return_unchecked(1, v);
8248 +}
8249 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8250
8251 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8252 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8253 return cmpxchg(&v->counter, old, new);
8254 }
8255
8256 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8257 +{
8258 + return cmpxchg(&v->counter, old, new);
8259 +}
8260 +
8261 static inline long atomic64_xchg(atomic64_t *v, long new)
8262 {
8263 return xchg(&v->counter, new);
8264 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8265 */
8266 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8267 {
8268 - long c, old;
8269 + long c, old, new;
8270 c = atomic64_read(v);
8271 for (;;) {
8272 - if (unlikely(c == (u)))
8273 + if (unlikely(c == u))
8274 break;
8275 - old = atomic64_cmpxchg((v), c, c + (a));
8276 +
8277 + asm volatile("add %2,%0\n"
8278 +
8279 +#ifdef CONFIG_PAX_REFCOUNT
8280 + "jno 0f\n"
8281 + "sub %2,%0\n"
8282 + "int $4\n0:\n"
8283 + _ASM_EXTABLE(0b, 0b)
8284 +#endif
8285 +
8286 + : "=r" (new)
8287 + : "0" (c), "ir" (a));
8288 +
8289 + old = atomic64_cmpxchg(v, c, new);
8290 if (likely(old == c))
8291 break;
8292 c = old;
8293 }
8294 - return c != (u);
8295 + return c != u;
8296 }
8297
8298 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8299 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8300 index 1775d6e..b65017f 100644
8301 --- a/arch/x86/include/asm/bitops.h
8302 +++ b/arch/x86/include/asm/bitops.h
8303 @@ -38,7 +38,7 @@
8304 * a mask operation on a byte.
8305 */
8306 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8307 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8308 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8309 #define CONST_MASK(nr) (1 << ((nr) & 7))
8310
8311 /**
8312 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8313 index 5e1a2ee..c9f9533 100644
8314 --- a/arch/x86/include/asm/boot.h
8315 +++ b/arch/x86/include/asm/boot.h
8316 @@ -11,10 +11,15 @@
8317 #include <asm/pgtable_types.h>
8318
8319 /* Physical address where kernel should be loaded. */
8320 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8321 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8322 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8323 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8324
8325 +#ifndef __ASSEMBLY__
8326 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8327 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8328 +#endif
8329 +
8330 /* Minimum kernel alignment, as a power of two */
8331 #ifdef CONFIG_X86_64
8332 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8333 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8334 index 48f99f1..d78ebf9 100644
8335 --- a/arch/x86/include/asm/cache.h
8336 +++ b/arch/x86/include/asm/cache.h
8337 @@ -5,12 +5,13 @@
8338
8339 /* L1 cache line size */
8340 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8341 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8342 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8343
8344 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8345 +#define __read_only __attribute__((__section__(".data..read_only")))
8346
8347 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8348 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8349 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8350
8351 #ifdef CONFIG_X86_VSMP
8352 #ifdef CONFIG_SMP
8353 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8354 index 4e12668..501d239 100644
8355 --- a/arch/x86/include/asm/cacheflush.h
8356 +++ b/arch/x86/include/asm/cacheflush.h
8357 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8358 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8359
8360 if (pg_flags == _PGMT_DEFAULT)
8361 - return -1;
8362 + return ~0UL;
8363 else if (pg_flags == _PGMT_WC)
8364 return _PAGE_CACHE_WC;
8365 else if (pg_flags == _PGMT_UC_MINUS)
8366 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8367 index 46fc474..b02b0f9 100644
8368 --- a/arch/x86/include/asm/checksum_32.h
8369 +++ b/arch/x86/include/asm/checksum_32.h
8370 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8371 int len, __wsum sum,
8372 int *src_err_ptr, int *dst_err_ptr);
8373
8374 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8375 + int len, __wsum sum,
8376 + int *src_err_ptr, int *dst_err_ptr);
8377 +
8378 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8379 + int len, __wsum sum,
8380 + int *src_err_ptr, int *dst_err_ptr);
8381 +
8382 /*
8383 * Note: when you get a NULL pointer exception here this means someone
8384 * passed in an incorrect kernel address to one of these functions.
8385 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8386 int *err_ptr)
8387 {
8388 might_sleep();
8389 - return csum_partial_copy_generic((__force void *)src, dst,
8390 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8391 len, sum, err_ptr, NULL);
8392 }
8393
8394 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8395 {
8396 might_sleep();
8397 if (access_ok(VERIFY_WRITE, dst, len))
8398 - return csum_partial_copy_generic(src, (__force void *)dst,
8399 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8400 len, sum, NULL, err_ptr);
8401
8402 if (len)
8403 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8404 index 5d3acdf..6447a02 100644
8405 --- a/arch/x86/include/asm/cmpxchg.h
8406 +++ b/arch/x86/include/asm/cmpxchg.h
8407 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8408 __compiletime_error("Bad argument size for cmpxchg");
8409 extern void __xadd_wrong_size(void)
8410 __compiletime_error("Bad argument size for xadd");
8411 +extern void __xadd_check_overflow_wrong_size(void)
8412 + __compiletime_error("Bad argument size for xadd_check_overflow");
8413
8414 /*
8415 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8416 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8417 __ret; \
8418 })
8419
8420 +#define __xadd_check_overflow(ptr, inc, lock) \
8421 + ({ \
8422 + __typeof__ (*(ptr)) __ret = (inc); \
8423 + switch (sizeof(*(ptr))) { \
8424 + case __X86_CASE_L: \
8425 + asm volatile (lock "xaddl %0, %1\n" \
8426 + "jno 0f\n" \
8427 + "mov %0,%1\n" \
8428 + "int $4\n0:\n" \
8429 + _ASM_EXTABLE(0b, 0b) \
8430 + : "+r" (__ret), "+m" (*(ptr)) \
8431 + : : "memory", "cc"); \
8432 + break; \
8433 + case __X86_CASE_Q: \
8434 + asm volatile (lock "xaddq %q0, %1\n" \
8435 + "jno 0f\n" \
8436 + "mov %0,%1\n" \
8437 + "int $4\n0:\n" \
8438 + _ASM_EXTABLE(0b, 0b) \
8439 + : "+r" (__ret), "+m" (*(ptr)) \
8440 + : : "memory", "cc"); \
8441 + break; \
8442 + default: \
8443 + __xadd_check_overflow_wrong_size(); \
8444 + } \
8445 + __ret; \
8446 + })
8447 +
8448 /*
8449 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8450 * value of "*ptr".
8451 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8452 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8453 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8454
8455 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8456 +
8457 #endif /* ASM_X86_CMPXCHG_H */
8458 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8459 index f3444f7..051a196 100644
8460 --- a/arch/x86/include/asm/cpufeature.h
8461 +++ b/arch/x86/include/asm/cpufeature.h
8462 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8463 ".section .discard,\"aw\",@progbits\n"
8464 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8465 ".previous\n"
8466 - ".section .altinstr_replacement,\"ax\"\n"
8467 + ".section .altinstr_replacement,\"a\"\n"
8468 "3: movb $1,%0\n"
8469 "4:\n"
8470 ".previous\n"
8471 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8472 index 41935fa..3b40db8 100644
8473 --- a/arch/x86/include/asm/desc.h
8474 +++ b/arch/x86/include/asm/desc.h
8475 @@ -4,6 +4,7 @@
8476 #include <asm/desc_defs.h>
8477 #include <asm/ldt.h>
8478 #include <asm/mmu.h>
8479 +#include <asm/pgtable.h>
8480
8481 #include <linux/smp.h>
8482
8483 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8484
8485 desc->type = (info->read_exec_only ^ 1) << 1;
8486 desc->type |= info->contents << 2;
8487 + desc->type |= info->seg_not_present ^ 1;
8488
8489 desc->s = 1;
8490 desc->dpl = 0x3;
8491 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8492 }
8493
8494 extern struct desc_ptr idt_descr;
8495 -extern gate_desc idt_table[];
8496 -
8497 -struct gdt_page {
8498 - struct desc_struct gdt[GDT_ENTRIES];
8499 -} __attribute__((aligned(PAGE_SIZE)));
8500 -
8501 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8502 +extern gate_desc idt_table[256];
8503
8504 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8505 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8506 {
8507 - return per_cpu(gdt_page, cpu).gdt;
8508 + return cpu_gdt_table[cpu];
8509 }
8510
8511 #ifdef CONFIG_X86_64
8512 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8513 unsigned long base, unsigned dpl, unsigned flags,
8514 unsigned short seg)
8515 {
8516 - gate->a = (seg << 16) | (base & 0xffff);
8517 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8518 + gate->gate.offset_low = base;
8519 + gate->gate.seg = seg;
8520 + gate->gate.reserved = 0;
8521 + gate->gate.type = type;
8522 + gate->gate.s = 0;
8523 + gate->gate.dpl = dpl;
8524 + gate->gate.p = 1;
8525 + gate->gate.offset_high = base >> 16;
8526 }
8527
8528 #endif
8529 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8530
8531 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8532 {
8533 + pax_open_kernel();
8534 memcpy(&idt[entry], gate, sizeof(*gate));
8535 + pax_close_kernel();
8536 }
8537
8538 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8539 {
8540 + pax_open_kernel();
8541 memcpy(&ldt[entry], desc, 8);
8542 + pax_close_kernel();
8543 }
8544
8545 static inline void
8546 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8547 default: size = sizeof(*gdt); break;
8548 }
8549
8550 + pax_open_kernel();
8551 memcpy(&gdt[entry], desc, size);
8552 + pax_close_kernel();
8553 }
8554
8555 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8556 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8557
8558 static inline void native_load_tr_desc(void)
8559 {
8560 + pax_open_kernel();
8561 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8562 + pax_close_kernel();
8563 }
8564
8565 static inline void native_load_gdt(const struct desc_ptr *dtr)
8566 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8567 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8568 unsigned int i;
8569
8570 + pax_open_kernel();
8571 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8572 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8573 + pax_close_kernel();
8574 }
8575
8576 #define _LDT_empty(info) \
8577 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8578 desc->limit = (limit >> 16) & 0xf;
8579 }
8580
8581 -static inline void _set_gate(int gate, unsigned type, void *addr,
8582 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8583 unsigned dpl, unsigned ist, unsigned seg)
8584 {
8585 gate_desc s;
8586 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8587 * Pentium F0 0F bugfix can have resulted in the mapped
8588 * IDT being write-protected.
8589 */
8590 -static inline void set_intr_gate(unsigned int n, void *addr)
8591 +static inline void set_intr_gate(unsigned int n, const void *addr)
8592 {
8593 BUG_ON((unsigned)n > 0xFF);
8594 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8595 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8596 /*
8597 * This routine sets up an interrupt gate at directory privilege level 3.
8598 */
8599 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8600 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8601 {
8602 BUG_ON((unsigned)n > 0xFF);
8603 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8604 }
8605
8606 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8607 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8608 {
8609 BUG_ON((unsigned)n > 0xFF);
8610 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8611 }
8612
8613 -static inline void set_trap_gate(unsigned int n, void *addr)
8614 +static inline void set_trap_gate(unsigned int n, const void *addr)
8615 {
8616 BUG_ON((unsigned)n > 0xFF);
8617 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8618 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8619 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8620 {
8621 BUG_ON((unsigned)n > 0xFF);
8622 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8623 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8624 }
8625
8626 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8627 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8628 {
8629 BUG_ON((unsigned)n > 0xFF);
8630 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8631 }
8632
8633 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8634 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8635 {
8636 BUG_ON((unsigned)n > 0xFF);
8637 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8638 }
8639
8640 +#ifdef CONFIG_X86_32
8641 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8642 +{
8643 + struct desc_struct d;
8644 +
8645 + if (likely(limit))
8646 + limit = (limit - 1UL) >> PAGE_SHIFT;
8647 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8648 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8649 +}
8650 +#endif
8651 +
8652 #endif /* _ASM_X86_DESC_H */
8653 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8654 index 278441f..b95a174 100644
8655 --- a/arch/x86/include/asm/desc_defs.h
8656 +++ b/arch/x86/include/asm/desc_defs.h
8657 @@ -31,6 +31,12 @@ struct desc_struct {
8658 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8659 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8660 };
8661 + struct {
8662 + u16 offset_low;
8663 + u16 seg;
8664 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8665 + unsigned offset_high: 16;
8666 + } gate;
8667 };
8668 } __attribute__((packed));
8669
8670 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8671 index 908b969..a1f4eb4 100644
8672 --- a/arch/x86/include/asm/e820.h
8673 +++ b/arch/x86/include/asm/e820.h
8674 @@ -69,7 +69,7 @@ struct e820map {
8675 #define ISA_START_ADDRESS 0xa0000
8676 #define ISA_END_ADDRESS 0x100000
8677
8678 -#define BIOS_BEGIN 0x000a0000
8679 +#define BIOS_BEGIN 0x000c0000
8680 #define BIOS_END 0x00100000
8681
8682 #define BIOS_ROM_BASE 0xffe00000
8683 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8684 index 5f962df..7289f09 100644
8685 --- a/arch/x86/include/asm/elf.h
8686 +++ b/arch/x86/include/asm/elf.h
8687 @@ -238,7 +238,25 @@ extern int force_personality32;
8688 the loader. We need to make sure that it is out of the way of the program
8689 that it will "exec", and that there is sufficient room for the brk. */
8690
8691 +#ifdef CONFIG_PAX_SEGMEXEC
8692 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8693 +#else
8694 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8695 +#endif
8696 +
8697 +#ifdef CONFIG_PAX_ASLR
8698 +#ifdef CONFIG_X86_32
8699 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8700 +
8701 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8702 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8703 +#else
8704 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8705 +
8706 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8707 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8708 +#endif
8709 +#endif
8710
8711 /* This yields a mask that user programs can use to figure out what
8712 instruction set this CPU supports. This could be done in user space,
8713 @@ -291,9 +309,7 @@ do { \
8714
8715 #define ARCH_DLINFO \
8716 do { \
8717 - if (vdso_enabled) \
8718 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8719 - (unsigned long)current->mm->context.vdso); \
8720 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8721 } while (0)
8722
8723 #define AT_SYSINFO 32
8724 @@ -304,7 +320,7 @@ do { \
8725
8726 #endif /* !CONFIG_X86_32 */
8727
8728 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8729 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8730
8731 #define VDSO_ENTRY \
8732 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8733 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8734 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8735 #define compat_arch_setup_additional_pages syscall32_setup_pages
8736
8737 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8738 -#define arch_randomize_brk arch_randomize_brk
8739 -
8740 /*
8741 * True on X86_32 or when emulating IA32 on X86_64
8742 */
8743 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8744 index cc70c1c..d96d011 100644
8745 --- a/arch/x86/include/asm/emergency-restart.h
8746 +++ b/arch/x86/include/asm/emergency-restart.h
8747 @@ -15,6 +15,6 @@ enum reboot_type {
8748
8749 extern enum reboot_type reboot_type;
8750
8751 -extern void machine_emergency_restart(void);
8752 +extern void machine_emergency_restart(void) __noreturn;
8753
8754 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8755 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8756 index d09bb03..4ea4194 100644
8757 --- a/arch/x86/include/asm/futex.h
8758 +++ b/arch/x86/include/asm/futex.h
8759 @@ -12,16 +12,18 @@
8760 #include <asm/system.h>
8761
8762 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8763 + typecheck(u32 __user *, uaddr); \
8764 asm volatile("1:\t" insn "\n" \
8765 "2:\t.section .fixup,\"ax\"\n" \
8766 "3:\tmov\t%3, %1\n" \
8767 "\tjmp\t2b\n" \
8768 "\t.previous\n" \
8769 _ASM_EXTABLE(1b, 3b) \
8770 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8771 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8772 : "i" (-EFAULT), "0" (oparg), "1" (0))
8773
8774 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8775 + typecheck(u32 __user *, uaddr); \
8776 asm volatile("1:\tmovl %2, %0\n" \
8777 "\tmovl\t%0, %3\n" \
8778 "\t" insn "\n" \
8779 @@ -34,7 +36,7 @@
8780 _ASM_EXTABLE(1b, 4b) \
8781 _ASM_EXTABLE(2b, 4b) \
8782 : "=&a" (oldval), "=&r" (ret), \
8783 - "+m" (*uaddr), "=&r" (tem) \
8784 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8785 : "r" (oparg), "i" (-EFAULT), "1" (0))
8786
8787 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8788 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8789
8790 switch (op) {
8791 case FUTEX_OP_SET:
8792 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8793 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8794 break;
8795 case FUTEX_OP_ADD:
8796 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8797 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8798 uaddr, oparg);
8799 break;
8800 case FUTEX_OP_OR:
8801 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8802 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8803 return -EFAULT;
8804
8805 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8806 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8807 "2:\t.section .fixup, \"ax\"\n"
8808 "3:\tmov %3, %0\n"
8809 "\tjmp 2b\n"
8810 "\t.previous\n"
8811 _ASM_EXTABLE(1b, 3b)
8812 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8813 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8814 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8815 : "memory"
8816 );
8817 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8818 index eb92a6e..b98b2f4 100644
8819 --- a/arch/x86/include/asm/hw_irq.h
8820 +++ b/arch/x86/include/asm/hw_irq.h
8821 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8822 extern void enable_IO_APIC(void);
8823
8824 /* Statistics */
8825 -extern atomic_t irq_err_count;
8826 -extern atomic_t irq_mis_count;
8827 +extern atomic_unchecked_t irq_err_count;
8828 +extern atomic_unchecked_t irq_mis_count;
8829
8830 /* EISA */
8831 extern void eisa_set_level_irq(unsigned int irq);
8832 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8833 index a850b4d..bae26dc 100644
8834 --- a/arch/x86/include/asm/i387.h
8835 +++ b/arch/x86/include/asm/i387.h
8836 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8837 {
8838 int err;
8839
8840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8841 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8842 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8843 +#endif
8844 +
8845 /* See comment in fxsave() below. */
8846 #ifdef CONFIG_AS_FXSAVEQ
8847 asm volatile("1: fxrstorq %[fx]\n\t"
8848 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8849 {
8850 int err;
8851
8852 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8853 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8854 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8855 +#endif
8856 +
8857 /*
8858 * Clear the bytes not touched by the fxsave and reserved
8859 * for the SW usage.
8860 @@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
8861 static inline bool interrupted_user_mode(void)
8862 {
8863 struct pt_regs *regs = get_irq_regs();
8864 - return regs && user_mode_vm(regs);
8865 + return regs && user_mode(regs);
8866 }
8867
8868 /*
8869 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8870 index d8e8eef..99f81ae 100644
8871 --- a/arch/x86/include/asm/io.h
8872 +++ b/arch/x86/include/asm/io.h
8873 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8874
8875 #include <linux/vmalloc.h>
8876
8877 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8878 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8879 +{
8880 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8881 +}
8882 +
8883 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8884 +{
8885 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8886 +}
8887 +
8888 /*
8889 * Convert a virtual cached pointer to an uncached pointer
8890 */
8891 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8892 index bba3cf8..06bc8da 100644
8893 --- a/arch/x86/include/asm/irqflags.h
8894 +++ b/arch/x86/include/asm/irqflags.h
8895 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8896 sti; \
8897 sysexit
8898
8899 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8900 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8901 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8902 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8903 +
8904 #else
8905 #define INTERRUPT_RETURN iret
8906 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8907 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8908 index 5478825..839e88c 100644
8909 --- a/arch/x86/include/asm/kprobes.h
8910 +++ b/arch/x86/include/asm/kprobes.h
8911 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8912 #define RELATIVEJUMP_SIZE 5
8913 #define RELATIVECALL_OPCODE 0xe8
8914 #define RELATIVE_ADDR_SIZE 4
8915 -#define MAX_STACK_SIZE 64
8916 -#define MIN_STACK_SIZE(ADDR) \
8917 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8918 - THREAD_SIZE - (unsigned long)(ADDR))) \
8919 - ? (MAX_STACK_SIZE) \
8920 - : (((unsigned long)current_thread_info()) + \
8921 - THREAD_SIZE - (unsigned long)(ADDR)))
8922 +#define MAX_STACK_SIZE 64UL
8923 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8924
8925 #define flush_insn_slot(p) do { } while (0)
8926
8927 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8928 index b4973f4..7c4d3fc 100644
8929 --- a/arch/x86/include/asm/kvm_host.h
8930 +++ b/arch/x86/include/asm/kvm_host.h
8931 @@ -459,7 +459,7 @@ struct kvm_arch {
8932 unsigned int n_requested_mmu_pages;
8933 unsigned int n_max_mmu_pages;
8934 unsigned int indirect_shadow_pages;
8935 - atomic_t invlpg_counter;
8936 + atomic_unchecked_t invlpg_counter;
8937 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8938 /*
8939 * Hash table of struct kvm_mmu_page.
8940 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8941 int (*check_intercept)(struct kvm_vcpu *vcpu,
8942 struct x86_instruction_info *info,
8943 enum x86_intercept_stage stage);
8944 -};
8945 +} __do_const;
8946
8947 struct kvm_arch_async_pf {
8948 u32 token;
8949 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8950 index 9cdae5d..300d20f 100644
8951 --- a/arch/x86/include/asm/local.h
8952 +++ b/arch/x86/include/asm/local.h
8953 @@ -18,26 +18,58 @@ typedef struct {
8954
8955 static inline void local_inc(local_t *l)
8956 {
8957 - asm volatile(_ASM_INC "%0"
8958 + asm volatile(_ASM_INC "%0\n"
8959 +
8960 +#ifdef CONFIG_PAX_REFCOUNT
8961 + "jno 0f\n"
8962 + _ASM_DEC "%0\n"
8963 + "int $4\n0:\n"
8964 + _ASM_EXTABLE(0b, 0b)
8965 +#endif
8966 +
8967 : "+m" (l->a.counter));
8968 }
8969
8970 static inline void local_dec(local_t *l)
8971 {
8972 - asm volatile(_ASM_DEC "%0"
8973 + asm volatile(_ASM_DEC "%0\n"
8974 +
8975 +#ifdef CONFIG_PAX_REFCOUNT
8976 + "jno 0f\n"
8977 + _ASM_INC "%0\n"
8978 + "int $4\n0:\n"
8979 + _ASM_EXTABLE(0b, 0b)
8980 +#endif
8981 +
8982 : "+m" (l->a.counter));
8983 }
8984
8985 static inline void local_add(long i, local_t *l)
8986 {
8987 - asm volatile(_ASM_ADD "%1,%0"
8988 + asm volatile(_ASM_ADD "%1,%0\n"
8989 +
8990 +#ifdef CONFIG_PAX_REFCOUNT
8991 + "jno 0f\n"
8992 + _ASM_SUB "%1,%0\n"
8993 + "int $4\n0:\n"
8994 + _ASM_EXTABLE(0b, 0b)
8995 +#endif
8996 +
8997 : "+m" (l->a.counter)
8998 : "ir" (i));
8999 }
9000
9001 static inline void local_sub(long i, local_t *l)
9002 {
9003 - asm volatile(_ASM_SUB "%1,%0"
9004 + asm volatile(_ASM_SUB "%1,%0\n"
9005 +
9006 +#ifdef CONFIG_PAX_REFCOUNT
9007 + "jno 0f\n"
9008 + _ASM_ADD "%1,%0\n"
9009 + "int $4\n0:\n"
9010 + _ASM_EXTABLE(0b, 0b)
9011 +#endif
9012 +
9013 : "+m" (l->a.counter)
9014 : "ir" (i));
9015 }
9016 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
9017 {
9018 unsigned char c;
9019
9020 - asm volatile(_ASM_SUB "%2,%0; sete %1"
9021 + asm volatile(_ASM_SUB "%2,%0\n"
9022 +
9023 +#ifdef CONFIG_PAX_REFCOUNT
9024 + "jno 0f\n"
9025 + _ASM_ADD "%2,%0\n"
9026 + "int $4\n0:\n"
9027 + _ASM_EXTABLE(0b, 0b)
9028 +#endif
9029 +
9030 + "sete %1\n"
9031 : "+m" (l->a.counter), "=qm" (c)
9032 : "ir" (i) : "memory");
9033 return c;
9034 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
9035 {
9036 unsigned char c;
9037
9038 - asm volatile(_ASM_DEC "%0; sete %1"
9039 + asm volatile(_ASM_DEC "%0\n"
9040 +
9041 +#ifdef CONFIG_PAX_REFCOUNT
9042 + "jno 0f\n"
9043 + _ASM_INC "%0\n"
9044 + "int $4\n0:\n"
9045 + _ASM_EXTABLE(0b, 0b)
9046 +#endif
9047 +
9048 + "sete %1\n"
9049 : "+m" (l->a.counter), "=qm" (c)
9050 : : "memory");
9051 return c != 0;
9052 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
9053 {
9054 unsigned char c;
9055
9056 - asm volatile(_ASM_INC "%0; sete %1"
9057 + asm volatile(_ASM_INC "%0\n"
9058 +
9059 +#ifdef CONFIG_PAX_REFCOUNT
9060 + "jno 0f\n"
9061 + _ASM_DEC "%0\n"
9062 + "int $4\n0:\n"
9063 + _ASM_EXTABLE(0b, 0b)
9064 +#endif
9065 +
9066 + "sete %1\n"
9067 : "+m" (l->a.counter), "=qm" (c)
9068 : : "memory");
9069 return c != 0;
9070 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
9071 {
9072 unsigned char c;
9073
9074 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9075 + asm volatile(_ASM_ADD "%2,%0\n"
9076 +
9077 +#ifdef CONFIG_PAX_REFCOUNT
9078 + "jno 0f\n"
9079 + _ASM_SUB "%2,%0\n"
9080 + "int $4\n0:\n"
9081 + _ASM_EXTABLE(0b, 0b)
9082 +#endif
9083 +
9084 + "sets %1\n"
9085 : "+m" (l->a.counter), "=qm" (c)
9086 : "ir" (i) : "memory");
9087 return c;
9088 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
9089 #endif
9090 /* Modern 486+ processor */
9091 __i = i;
9092 - asm volatile(_ASM_XADD "%0, %1;"
9093 + asm volatile(_ASM_XADD "%0, %1\n"
9094 +
9095 +#ifdef CONFIG_PAX_REFCOUNT
9096 + "jno 0f\n"
9097 + _ASM_MOV "%0,%1\n"
9098 + "int $4\n0:\n"
9099 + _ASM_EXTABLE(0b, 0b)
9100 +#endif
9101 +
9102 : "+r" (i), "+m" (l->a.counter)
9103 : : "memory");
9104 return i + __i;
9105 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
9106 index 593e51d..fa69c9a 100644
9107 --- a/arch/x86/include/asm/mman.h
9108 +++ b/arch/x86/include/asm/mman.h
9109 @@ -5,4 +5,14 @@
9110
9111 #include <asm-generic/mman.h>
9112
9113 +#ifdef __KERNEL__
9114 +#ifndef __ASSEMBLY__
9115 +#ifdef CONFIG_X86_32
9116 +#define arch_mmap_check i386_mmap_check
9117 +int i386_mmap_check(unsigned long addr, unsigned long len,
9118 + unsigned long flags);
9119 +#endif
9120 +#endif
9121 +#endif
9122 +
9123 #endif /* _ASM_X86_MMAN_H */
9124 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
9125 index 5f55e69..e20bfb1 100644
9126 --- a/arch/x86/include/asm/mmu.h
9127 +++ b/arch/x86/include/asm/mmu.h
9128 @@ -9,7 +9,7 @@
9129 * we put the segment information here.
9130 */
9131 typedef struct {
9132 - void *ldt;
9133 + struct desc_struct *ldt;
9134 int size;
9135
9136 #ifdef CONFIG_X86_64
9137 @@ -18,7 +18,19 @@ typedef struct {
9138 #endif
9139
9140 struct mutex lock;
9141 - void *vdso;
9142 + unsigned long vdso;
9143 +
9144 +#ifdef CONFIG_X86_32
9145 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9146 + unsigned long user_cs_base;
9147 + unsigned long user_cs_limit;
9148 +
9149 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9150 + cpumask_t cpu_user_cs_mask;
9151 +#endif
9152 +
9153 +#endif
9154 +#endif
9155 } mm_context_t;
9156
9157 #ifdef CONFIG_SMP
9158 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
9159 index 6902152..399f3a2 100644
9160 --- a/arch/x86/include/asm/mmu_context.h
9161 +++ b/arch/x86/include/asm/mmu_context.h
9162 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
9163
9164 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9165 {
9166 +
9167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9168 + unsigned int i;
9169 + pgd_t *pgd;
9170 +
9171 + pax_open_kernel();
9172 + pgd = get_cpu_pgd(smp_processor_id());
9173 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9174 + set_pgd_batched(pgd+i, native_make_pgd(0));
9175 + pax_close_kernel();
9176 +#endif
9177 +
9178 #ifdef CONFIG_SMP
9179 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9180 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9181 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9182 struct task_struct *tsk)
9183 {
9184 unsigned cpu = smp_processor_id();
9185 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9186 + int tlbstate = TLBSTATE_OK;
9187 +#endif
9188
9189 if (likely(prev != next)) {
9190 #ifdef CONFIG_SMP
9191 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9192 + tlbstate = percpu_read(cpu_tlbstate.state);
9193 +#endif
9194 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9195 percpu_write(cpu_tlbstate.active_mm, next);
9196 #endif
9197 cpumask_set_cpu(cpu, mm_cpumask(next));
9198
9199 /* Re-load page tables */
9200 +#ifdef CONFIG_PAX_PER_CPU_PGD
9201 + pax_open_kernel();
9202 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9203 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9204 + pax_close_kernel();
9205 + load_cr3(get_cpu_pgd(cpu));
9206 +#else
9207 load_cr3(next->pgd);
9208 +#endif
9209
9210 /* stop flush ipis for the previous mm */
9211 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9212 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9213 */
9214 if (unlikely(prev->context.ldt != next->context.ldt))
9215 load_LDT_nolock(&next->context);
9216 - }
9217 +
9218 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9219 + if (!(__supported_pte_mask & _PAGE_NX)) {
9220 + smp_mb__before_clear_bit();
9221 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9222 + smp_mb__after_clear_bit();
9223 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9224 + }
9225 +#endif
9226 +
9227 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9228 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9229 + prev->context.user_cs_limit != next->context.user_cs_limit))
9230 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9231 #ifdef CONFIG_SMP
9232 + else if (unlikely(tlbstate != TLBSTATE_OK))
9233 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9234 +#endif
9235 +#endif
9236 +
9237 + }
9238 else {
9239 +
9240 +#ifdef CONFIG_PAX_PER_CPU_PGD
9241 + pax_open_kernel();
9242 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9243 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9244 + pax_close_kernel();
9245 + load_cr3(get_cpu_pgd(cpu));
9246 +#endif
9247 +
9248 +#ifdef CONFIG_SMP
9249 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9250 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9251
9252 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9253 * tlb flush IPI delivery. We must reload CR3
9254 * to make sure to use no freed page tables.
9255 */
9256 +
9257 +#ifndef CONFIG_PAX_PER_CPU_PGD
9258 load_cr3(next->pgd);
9259 +#endif
9260 +
9261 load_LDT_nolock(&next->context);
9262 +
9263 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9264 + if (!(__supported_pte_mask & _PAGE_NX))
9265 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9266 +#endif
9267 +
9268 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9269 +#ifdef CONFIG_PAX_PAGEEXEC
9270 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9271 +#endif
9272 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9273 +#endif
9274 +
9275 }
9276 +#endif
9277 }
9278 -#endif
9279 }
9280
9281 #define activate_mm(prev, next) \
9282 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9283 index 9eae775..c914fea 100644
9284 --- a/arch/x86/include/asm/module.h
9285 +++ b/arch/x86/include/asm/module.h
9286 @@ -5,6 +5,7 @@
9287
9288 #ifdef CONFIG_X86_64
9289 /* X86_64 does not define MODULE_PROC_FAMILY */
9290 +#define MODULE_PROC_FAMILY ""
9291 #elif defined CONFIG_M386
9292 #define MODULE_PROC_FAMILY "386 "
9293 #elif defined CONFIG_M486
9294 @@ -59,8 +60,20 @@
9295 #error unknown processor family
9296 #endif
9297
9298 -#ifdef CONFIG_X86_32
9299 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9300 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9301 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9302 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9303 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9304 +#else
9305 +#define MODULE_PAX_KERNEXEC ""
9306 #endif
9307
9308 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9309 +#define MODULE_PAX_UDEREF "UDEREF "
9310 +#else
9311 +#define MODULE_PAX_UDEREF ""
9312 +#endif
9313 +
9314 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9315 +
9316 #endif /* _ASM_X86_MODULE_H */
9317 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9318 index 7639dbf..e08a58c 100644
9319 --- a/arch/x86/include/asm/page_64_types.h
9320 +++ b/arch/x86/include/asm/page_64_types.h
9321 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9322
9323 /* duplicated to the one in bootmem.h */
9324 extern unsigned long max_pfn;
9325 -extern unsigned long phys_base;
9326 +extern const unsigned long phys_base;
9327
9328 extern unsigned long __phys_addr(unsigned long);
9329 #define __phys_reloc_hide(x) (x)
9330 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9331 index a7d2db9..edb023e 100644
9332 --- a/arch/x86/include/asm/paravirt.h
9333 +++ b/arch/x86/include/asm/paravirt.h
9334 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9335 val);
9336 }
9337
9338 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9339 +{
9340 + pgdval_t val = native_pgd_val(pgd);
9341 +
9342 + if (sizeof(pgdval_t) > sizeof(long))
9343 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9344 + val, (u64)val >> 32);
9345 + else
9346 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9347 + val);
9348 +}
9349 +
9350 static inline void pgd_clear(pgd_t *pgdp)
9351 {
9352 set_pgd(pgdp, __pgd(0));
9353 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9354 pv_mmu_ops.set_fixmap(idx, phys, flags);
9355 }
9356
9357 +#ifdef CONFIG_PAX_KERNEXEC
9358 +static inline unsigned long pax_open_kernel(void)
9359 +{
9360 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9361 +}
9362 +
9363 +static inline unsigned long pax_close_kernel(void)
9364 +{
9365 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9366 +}
9367 +#else
9368 +static inline unsigned long pax_open_kernel(void) { return 0; }
9369 +static inline unsigned long pax_close_kernel(void) { return 0; }
9370 +#endif
9371 +
9372 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9373
9374 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9375 @@ -964,7 +991,7 @@ extern void default_banner(void);
9376
9377 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9378 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9379 -#define PARA_INDIRECT(addr) *%cs:addr
9380 +#define PARA_INDIRECT(addr) *%ss:addr
9381 #endif
9382
9383 #define INTERRUPT_RETURN \
9384 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9385 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9386 CLBR_NONE, \
9387 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9388 +
9389 +#define GET_CR0_INTO_RDI \
9390 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9391 + mov %rax,%rdi
9392 +
9393 +#define SET_RDI_INTO_CR0 \
9394 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9395 +
9396 +#define GET_CR3_INTO_RDI \
9397 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9398 + mov %rax,%rdi
9399 +
9400 +#define SET_RDI_INTO_CR3 \
9401 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9402 +
9403 #endif /* CONFIG_X86_32 */
9404
9405 #endif /* __ASSEMBLY__ */
9406 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9407 index 8e8b9a4..f07d725 100644
9408 --- a/arch/x86/include/asm/paravirt_types.h
9409 +++ b/arch/x86/include/asm/paravirt_types.h
9410 @@ -84,20 +84,20 @@ struct pv_init_ops {
9411 */
9412 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9413 unsigned long addr, unsigned len);
9414 -};
9415 +} __no_const;
9416
9417
9418 struct pv_lazy_ops {
9419 /* Set deferred update mode, used for batching operations. */
9420 void (*enter)(void);
9421 void (*leave)(void);
9422 -};
9423 +} __no_const;
9424
9425 struct pv_time_ops {
9426 unsigned long long (*sched_clock)(void);
9427 unsigned long long (*steal_clock)(int cpu);
9428 unsigned long (*get_tsc_khz)(void);
9429 -};
9430 +} __no_const;
9431
9432 struct pv_cpu_ops {
9433 /* hooks for various privileged instructions */
9434 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9435
9436 void (*start_context_switch)(struct task_struct *prev);
9437 void (*end_context_switch)(struct task_struct *next);
9438 -};
9439 +} __no_const;
9440
9441 struct pv_irq_ops {
9442 /*
9443 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9444 unsigned long start_eip,
9445 unsigned long start_esp);
9446 #endif
9447 -};
9448 +} __no_const;
9449
9450 struct pv_mmu_ops {
9451 unsigned long (*read_cr2)(void);
9452 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9453 struct paravirt_callee_save make_pud;
9454
9455 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9456 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9457 #endif /* PAGETABLE_LEVELS == 4 */
9458 #endif /* PAGETABLE_LEVELS >= 3 */
9459
9460 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9461 an mfn. We can tell which is which from the index. */
9462 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9463 phys_addr_t phys, pgprot_t flags);
9464 +
9465 +#ifdef CONFIG_PAX_KERNEXEC
9466 + unsigned long (*pax_open_kernel)(void);
9467 + unsigned long (*pax_close_kernel)(void);
9468 +#endif
9469 +
9470 };
9471
9472 struct arch_spinlock;
9473 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9474 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9475 int (*spin_trylock)(struct arch_spinlock *lock);
9476 void (*spin_unlock)(struct arch_spinlock *lock);
9477 -};
9478 +} __no_const;
9479
9480 /* This contains all the paravirt structures: we get a convenient
9481 * number for each function using the offset which we use to indicate
9482 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9483 index b4389a4..b7ff22c 100644
9484 --- a/arch/x86/include/asm/pgalloc.h
9485 +++ b/arch/x86/include/asm/pgalloc.h
9486 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9487 pmd_t *pmd, pte_t *pte)
9488 {
9489 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9490 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9491 +}
9492 +
9493 +static inline void pmd_populate_user(struct mm_struct *mm,
9494 + pmd_t *pmd, pte_t *pte)
9495 +{
9496 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9497 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9498 }
9499
9500 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9501 index 98391db..8f6984e 100644
9502 --- a/arch/x86/include/asm/pgtable-2level.h
9503 +++ b/arch/x86/include/asm/pgtable-2level.h
9504 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9505
9506 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9507 {
9508 + pax_open_kernel();
9509 *pmdp = pmd;
9510 + pax_close_kernel();
9511 }
9512
9513 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9514 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9515 index effff47..f9e4035 100644
9516 --- a/arch/x86/include/asm/pgtable-3level.h
9517 +++ b/arch/x86/include/asm/pgtable-3level.h
9518 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9519
9520 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9521 {
9522 + pax_open_kernel();
9523 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9524 + pax_close_kernel();
9525 }
9526
9527 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9528 {
9529 + pax_open_kernel();
9530 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9531 + pax_close_kernel();
9532 }
9533
9534 /*
9535 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9536 index 18601c8..3d716d1 100644
9537 --- a/arch/x86/include/asm/pgtable.h
9538 +++ b/arch/x86/include/asm/pgtable.h
9539 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9540
9541 #ifndef __PAGETABLE_PUD_FOLDED
9542 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9543 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9544 #define pgd_clear(pgd) native_pgd_clear(pgd)
9545 #endif
9546
9547 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9548
9549 #define arch_end_context_switch(prev) do {} while(0)
9550
9551 +#define pax_open_kernel() native_pax_open_kernel()
9552 +#define pax_close_kernel() native_pax_close_kernel()
9553 #endif /* CONFIG_PARAVIRT */
9554
9555 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9556 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9557 +
9558 +#ifdef CONFIG_PAX_KERNEXEC
9559 +static inline unsigned long native_pax_open_kernel(void)
9560 +{
9561 + unsigned long cr0;
9562 +
9563 + preempt_disable();
9564 + barrier();
9565 + cr0 = read_cr0() ^ X86_CR0_WP;
9566 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9567 + write_cr0(cr0);
9568 + return cr0 ^ X86_CR0_WP;
9569 +}
9570 +
9571 +static inline unsigned long native_pax_close_kernel(void)
9572 +{
9573 + unsigned long cr0;
9574 +
9575 + cr0 = read_cr0() ^ X86_CR0_WP;
9576 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9577 + write_cr0(cr0);
9578 + barrier();
9579 + preempt_enable_no_resched();
9580 + return cr0 ^ X86_CR0_WP;
9581 +}
9582 +#else
9583 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9584 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9585 +#endif
9586 +
9587 /*
9588 * The following only work if pte_present() is true.
9589 * Undefined behaviour if not..
9590 */
9591 +static inline int pte_user(pte_t pte)
9592 +{
9593 + return pte_val(pte) & _PAGE_USER;
9594 +}
9595 +
9596 static inline int pte_dirty(pte_t pte)
9597 {
9598 return pte_flags(pte) & _PAGE_DIRTY;
9599 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9600 return pte_clear_flags(pte, _PAGE_RW);
9601 }
9602
9603 +static inline pte_t pte_mkread(pte_t pte)
9604 +{
9605 + return __pte(pte_val(pte) | _PAGE_USER);
9606 +}
9607 +
9608 static inline pte_t pte_mkexec(pte_t pte)
9609 {
9610 - return pte_clear_flags(pte, _PAGE_NX);
9611 +#ifdef CONFIG_X86_PAE
9612 + if (__supported_pte_mask & _PAGE_NX)
9613 + return pte_clear_flags(pte, _PAGE_NX);
9614 + else
9615 +#endif
9616 + return pte_set_flags(pte, _PAGE_USER);
9617 +}
9618 +
9619 +static inline pte_t pte_exprotect(pte_t pte)
9620 +{
9621 +#ifdef CONFIG_X86_PAE
9622 + if (__supported_pte_mask & _PAGE_NX)
9623 + return pte_set_flags(pte, _PAGE_NX);
9624 + else
9625 +#endif
9626 + return pte_clear_flags(pte, _PAGE_USER);
9627 }
9628
9629 static inline pte_t pte_mkdirty(pte_t pte)
9630 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9631 #endif
9632
9633 #ifndef __ASSEMBLY__
9634 +
9635 +#ifdef CONFIG_PAX_PER_CPU_PGD
9636 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9637 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9638 +{
9639 + return cpu_pgd[cpu];
9640 +}
9641 +#endif
9642 +
9643 #include <linux/mm_types.h>
9644
9645 static inline int pte_none(pte_t pte)
9646 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9647
9648 static inline int pgd_bad(pgd_t pgd)
9649 {
9650 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9651 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9652 }
9653
9654 static inline int pgd_none(pgd_t pgd)
9655 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9656 * pgd_offset() returns a (pgd_t *)
9657 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9658 */
9659 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9660 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9661 +
9662 +#ifdef CONFIG_PAX_PER_CPU_PGD
9663 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9664 +#endif
9665 +
9666 /*
9667 * a shortcut which implies the use of the kernel's pgd, instead
9668 * of a process's
9669 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9670 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9671 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9672
9673 +#ifdef CONFIG_X86_32
9674 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9675 +#else
9676 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9677 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9678 +
9679 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9680 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9681 +#else
9682 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9683 +#endif
9684 +
9685 +#endif
9686 +
9687 #ifndef __ASSEMBLY__
9688
9689 extern int direct_gbpages;
9690 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9691 * dst and src can be on the same page, but the range must not overlap,
9692 * and must not cross a page boundary.
9693 */
9694 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9695 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9696 {
9697 - memcpy(dst, src, count * sizeof(pgd_t));
9698 + pax_open_kernel();
9699 + while (count--)
9700 + *dst++ = *src++;
9701 + pax_close_kernel();
9702 }
9703
9704 +#ifdef CONFIG_PAX_PER_CPU_PGD
9705 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9706 +#endif
9707 +
9708 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9709 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9710 +#else
9711 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9712 +#endif
9713
9714 #include <asm-generic/pgtable.h>
9715 #endif /* __ASSEMBLY__ */
9716 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9717 index 0c92113..34a77c6 100644
9718 --- a/arch/x86/include/asm/pgtable_32.h
9719 +++ b/arch/x86/include/asm/pgtable_32.h
9720 @@ -25,9 +25,6 @@
9721 struct mm_struct;
9722 struct vm_area_struct;
9723
9724 -extern pgd_t swapper_pg_dir[1024];
9725 -extern pgd_t initial_page_table[1024];
9726 -
9727 static inline void pgtable_cache_init(void) { }
9728 static inline void check_pgt_cache(void) { }
9729 void paging_init(void);
9730 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9731 # include <asm/pgtable-2level.h>
9732 #endif
9733
9734 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9735 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9736 +#ifdef CONFIG_X86_PAE
9737 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9738 +#endif
9739 +
9740 #if defined(CONFIG_HIGHPTE)
9741 #define pte_offset_map(dir, address) \
9742 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9743 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9744 /* Clear a kernel PTE and flush it from the TLB */
9745 #define kpte_clear_flush(ptep, vaddr) \
9746 do { \
9747 + pax_open_kernel(); \
9748 pte_clear(&init_mm, (vaddr), (ptep)); \
9749 + pax_close_kernel(); \
9750 __flush_tlb_one((vaddr)); \
9751 } while (0)
9752
9753 @@ -74,6 +79,9 @@ do { \
9754
9755 #endif /* !__ASSEMBLY__ */
9756
9757 +#define HAVE_ARCH_UNMAPPED_AREA
9758 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9759 +
9760 /*
9761 * kern_addr_valid() is (1) for FLATMEM and (0) for
9762 * SPARSEMEM and DISCONTIGMEM
9763 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9764 index ed5903b..c7fe163 100644
9765 --- a/arch/x86/include/asm/pgtable_32_types.h
9766 +++ b/arch/x86/include/asm/pgtable_32_types.h
9767 @@ -8,7 +8,7 @@
9768 */
9769 #ifdef CONFIG_X86_PAE
9770 # include <asm/pgtable-3level_types.h>
9771 -# define PMD_SIZE (1UL << PMD_SHIFT)
9772 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9773 # define PMD_MASK (~(PMD_SIZE - 1))
9774 #else
9775 # include <asm/pgtable-2level_types.h>
9776 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9777 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9778 #endif
9779
9780 +#ifdef CONFIG_PAX_KERNEXEC
9781 +#ifndef __ASSEMBLY__
9782 +extern unsigned char MODULES_EXEC_VADDR[];
9783 +extern unsigned char MODULES_EXEC_END[];
9784 +#endif
9785 +#include <asm/boot.h>
9786 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9787 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9788 +#else
9789 +#define ktla_ktva(addr) (addr)
9790 +#define ktva_ktla(addr) (addr)
9791 +#endif
9792 +
9793 #define MODULES_VADDR VMALLOC_START
9794 #define MODULES_END VMALLOC_END
9795 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9796 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9797 index 975f709..107976d 100644
9798 --- a/arch/x86/include/asm/pgtable_64.h
9799 +++ b/arch/x86/include/asm/pgtable_64.h
9800 @@ -16,10 +16,14 @@
9801
9802 extern pud_t level3_kernel_pgt[512];
9803 extern pud_t level3_ident_pgt[512];
9804 +extern pud_t level3_vmalloc_start_pgt[512];
9805 +extern pud_t level3_vmalloc_end_pgt[512];
9806 +extern pud_t level3_vmemmap_pgt[512];
9807 +extern pud_t level2_vmemmap_pgt[512];
9808 extern pmd_t level2_kernel_pgt[512];
9809 extern pmd_t level2_fixmap_pgt[512];
9810 -extern pmd_t level2_ident_pgt[512];
9811 -extern pgd_t init_level4_pgt[];
9812 +extern pmd_t level2_ident_pgt[512*2];
9813 +extern pgd_t init_level4_pgt[512];
9814
9815 #define swapper_pg_dir init_level4_pgt
9816
9817 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9818
9819 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9820 {
9821 + pax_open_kernel();
9822 *pmdp = pmd;
9823 + pax_close_kernel();
9824 }
9825
9826 static inline void native_pmd_clear(pmd_t *pmd)
9827 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9828
9829 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9830 {
9831 + pax_open_kernel();
9832 + *pgdp = pgd;
9833 + pax_close_kernel();
9834 +}
9835 +
9836 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9837 +{
9838 *pgdp = pgd;
9839 }
9840
9841 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9842 index 766ea16..5b96cb3 100644
9843 --- a/arch/x86/include/asm/pgtable_64_types.h
9844 +++ b/arch/x86/include/asm/pgtable_64_types.h
9845 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9846 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9847 #define MODULES_END _AC(0xffffffffff000000, UL)
9848 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9849 +#define MODULES_EXEC_VADDR MODULES_VADDR
9850 +#define MODULES_EXEC_END MODULES_END
9851 +
9852 +#define ktla_ktva(addr) (addr)
9853 +#define ktva_ktla(addr) (addr)
9854
9855 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9856 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9857 index 013286a..8b42f4f 100644
9858 --- a/arch/x86/include/asm/pgtable_types.h
9859 +++ b/arch/x86/include/asm/pgtable_types.h
9860 @@ -16,13 +16,12 @@
9861 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9862 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9863 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9864 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9865 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9866 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9867 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9868 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9869 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9870 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9871 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9872 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9873 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9874 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9875
9876 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9877 @@ -40,7 +39,6 @@
9878 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9879 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9880 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9881 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9882 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9883 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9884 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9885 @@ -57,8 +55,10 @@
9886
9887 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9888 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9889 -#else
9890 +#elif defined(CONFIG_KMEMCHECK)
9891 #define _PAGE_NX (_AT(pteval_t, 0))
9892 +#else
9893 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9894 #endif
9895
9896 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9897 @@ -96,6 +96,9 @@
9898 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9899 _PAGE_ACCESSED)
9900
9901 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9902 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9903 +
9904 #define __PAGE_KERNEL_EXEC \
9905 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9906 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9907 @@ -106,7 +109,7 @@
9908 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9909 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9910 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9911 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9912 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9913 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9914 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9915 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9916 @@ -168,8 +171,8 @@
9917 * bits are combined, this will alow user to access the high address mapped
9918 * VDSO in the presence of CONFIG_COMPAT_VDSO
9919 */
9920 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9921 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9922 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9923 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9924 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9925 #endif
9926
9927 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9928 {
9929 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9930 }
9931 +#endif
9932
9933 +#if PAGETABLE_LEVELS == 3
9934 +#include <asm-generic/pgtable-nopud.h>
9935 +#endif
9936 +
9937 +#if PAGETABLE_LEVELS == 2
9938 +#include <asm-generic/pgtable-nopmd.h>
9939 +#endif
9940 +
9941 +#ifndef __ASSEMBLY__
9942 #if PAGETABLE_LEVELS > 3
9943 typedef struct { pudval_t pud; } pud_t;
9944
9945 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9946 return pud.pud;
9947 }
9948 #else
9949 -#include <asm-generic/pgtable-nopud.h>
9950 -
9951 static inline pudval_t native_pud_val(pud_t pud)
9952 {
9953 return native_pgd_val(pud.pgd);
9954 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9955 return pmd.pmd;
9956 }
9957 #else
9958 -#include <asm-generic/pgtable-nopmd.h>
9959 -
9960 static inline pmdval_t native_pmd_val(pmd_t pmd)
9961 {
9962 return native_pgd_val(pmd.pud.pgd);
9963 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9964
9965 extern pteval_t __supported_pte_mask;
9966 extern void set_nx(void);
9967 -extern int nx_enabled;
9968
9969 #define pgprot_writecombine pgprot_writecombine
9970 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9971 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9972 index bb3ee36..781a6b8 100644
9973 --- a/arch/x86/include/asm/processor.h
9974 +++ b/arch/x86/include/asm/processor.h
9975 @@ -268,7 +268,7 @@ struct tss_struct {
9976
9977 } ____cacheline_aligned;
9978
9979 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9980 +extern struct tss_struct init_tss[NR_CPUS];
9981
9982 /*
9983 * Save the original ist values for checking stack pointers during debugging
9984 @@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
9985 */
9986 #define TASK_SIZE PAGE_OFFSET
9987 #define TASK_SIZE_MAX TASK_SIZE
9988 +
9989 +#ifdef CONFIG_PAX_SEGMEXEC
9990 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9991 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9992 +#else
9993 #define STACK_TOP TASK_SIZE
9994 -#define STACK_TOP_MAX STACK_TOP
9995 +#endif
9996 +
9997 +#define STACK_TOP_MAX TASK_SIZE
9998
9999 #define INIT_THREAD { \
10000 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10001 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10002 .vm86_info = NULL, \
10003 .sysenter_cs = __KERNEL_CS, \
10004 .io_bitmap_ptr = NULL, \
10005 @@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
10006 */
10007 #define INIT_TSS { \
10008 .x86_tss = { \
10009 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10010 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10011 .ss0 = __KERNEL_DS, \
10012 .ss1 = __KERNEL_CS, \
10013 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10014 @@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
10015 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10016
10017 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10018 -#define KSTK_TOP(info) \
10019 -({ \
10020 - unsigned long *__ptr = (unsigned long *)(info); \
10021 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10022 -})
10023 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10024
10025 /*
10026 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10027 @@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10028 #define task_pt_regs(task) \
10029 ({ \
10030 struct pt_regs *__regs__; \
10031 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10032 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10033 __regs__ - 1; \
10034 })
10035
10036 @@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10037 /*
10038 * User space process size. 47bits minus one guard page.
10039 */
10040 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10041 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10042
10043 /* This decides where the kernel will search for a free chunk of vm
10044 * space during mmap's.
10045 */
10046 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10047 - 0xc0000000 : 0xFFFFe000)
10048 + 0xc0000000 : 0xFFFFf000)
10049
10050 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10051 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10052 @@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10053 #define STACK_TOP_MAX TASK_SIZE_MAX
10054
10055 #define INIT_THREAD { \
10056 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10057 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10058 }
10059
10060 #define INIT_TSS { \
10061 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10062 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10063 }
10064
10065 /*
10066 @@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10067 */
10068 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10069
10070 +#ifdef CONFIG_PAX_SEGMEXEC
10071 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10072 +#endif
10073 +
10074 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10075
10076 /* Get/set a process' ability to use the timestamp counter instruction */
10077 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10078 index 3566454..4bdfb8c 100644
10079 --- a/arch/x86/include/asm/ptrace.h
10080 +++ b/arch/x86/include/asm/ptrace.h
10081 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10082 }
10083
10084 /*
10085 - * user_mode_vm(regs) determines whether a register set came from user mode.
10086 + * user_mode(regs) determines whether a register set came from user mode.
10087 * This is true if V8086 mode was enabled OR if the register set was from
10088 * protected mode with RPL-3 CS value. This tricky test checks that with
10089 * one comparison. Many places in the kernel can bypass this full check
10090 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10091 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10092 + * be used.
10093 */
10094 -static inline int user_mode(struct pt_regs *regs)
10095 +static inline int user_mode_novm(struct pt_regs *regs)
10096 {
10097 #ifdef CONFIG_X86_32
10098 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10099 #else
10100 - return !!(regs->cs & 3);
10101 + return !!(regs->cs & SEGMENT_RPL_MASK);
10102 #endif
10103 }
10104
10105 -static inline int user_mode_vm(struct pt_regs *regs)
10106 +static inline int user_mode(struct pt_regs *regs)
10107 {
10108 #ifdef CONFIG_X86_32
10109 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10110 USER_RPL;
10111 #else
10112 - return user_mode(regs);
10113 + return user_mode_novm(regs);
10114 #endif
10115 }
10116
10117 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10118 #ifdef CONFIG_X86_64
10119 static inline bool user_64bit_mode(struct pt_regs *regs)
10120 {
10121 + unsigned long cs = regs->cs & 0xffff;
10122 #ifndef CONFIG_PARAVIRT
10123 /*
10124 * On non-paravirt systems, this is the only long mode CPL 3
10125 * selector. We do not allow long mode selectors in the LDT.
10126 */
10127 - return regs->cs == __USER_CS;
10128 + return cs == __USER_CS;
10129 #else
10130 /* Headers are too twisted for this to go in paravirt.h. */
10131 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
10132 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10133 #endif
10134 }
10135 #endif
10136 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
10137 index 92f29706..a79cbbb 100644
10138 --- a/arch/x86/include/asm/reboot.h
10139 +++ b/arch/x86/include/asm/reboot.h
10140 @@ -6,19 +6,19 @@
10141 struct pt_regs;
10142
10143 struct machine_ops {
10144 - void (*restart)(char *cmd);
10145 - void (*halt)(void);
10146 - void (*power_off)(void);
10147 + void (* __noreturn restart)(char *cmd);
10148 + void (* __noreturn halt)(void);
10149 + void (* __noreturn power_off)(void);
10150 void (*shutdown)(void);
10151 void (*crash_shutdown)(struct pt_regs *);
10152 - void (*emergency_restart)(void);
10153 -};
10154 + void (* __noreturn emergency_restart)(void);
10155 +} __no_const;
10156
10157 extern struct machine_ops machine_ops;
10158
10159 void native_machine_crash_shutdown(struct pt_regs *regs);
10160 void native_machine_shutdown(void);
10161 -void machine_real_restart(unsigned int type);
10162 +void machine_real_restart(unsigned int type) __noreturn;
10163 /* These must match dispatch_table in reboot_32.S */
10164 #define MRR_BIOS 0
10165 #define MRR_APM 1
10166 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
10167 index 2dbe4a7..ce1db00 100644
10168 --- a/arch/x86/include/asm/rwsem.h
10169 +++ b/arch/x86/include/asm/rwsem.h
10170 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
10171 {
10172 asm volatile("# beginning down_read\n\t"
10173 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10174 +
10175 +#ifdef CONFIG_PAX_REFCOUNT
10176 + "jno 0f\n"
10177 + LOCK_PREFIX _ASM_DEC "(%1)\n"
10178 + "int $4\n0:\n"
10179 + _ASM_EXTABLE(0b, 0b)
10180 +#endif
10181 +
10182 /* adds 0x00000001 */
10183 " jns 1f\n"
10184 " call call_rwsem_down_read_failed\n"
10185 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
10186 "1:\n\t"
10187 " mov %1,%2\n\t"
10188 " add %3,%2\n\t"
10189 +
10190 +#ifdef CONFIG_PAX_REFCOUNT
10191 + "jno 0f\n"
10192 + "sub %3,%2\n"
10193 + "int $4\n0:\n"
10194 + _ASM_EXTABLE(0b, 0b)
10195 +#endif
10196 +
10197 " jle 2f\n\t"
10198 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10199 " jnz 1b\n\t"
10200 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
10201 long tmp;
10202 asm volatile("# beginning down_write\n\t"
10203 LOCK_PREFIX " xadd %1,(%2)\n\t"
10204 +
10205 +#ifdef CONFIG_PAX_REFCOUNT
10206 + "jno 0f\n"
10207 + "mov %1,(%2)\n"
10208 + "int $4\n0:\n"
10209 + _ASM_EXTABLE(0b, 0b)
10210 +#endif
10211 +
10212 /* adds 0xffff0001, returns the old value */
10213 " test %1,%1\n\t"
10214 /* was the count 0 before? */
10215 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
10216 long tmp;
10217 asm volatile("# beginning __up_read\n\t"
10218 LOCK_PREFIX " xadd %1,(%2)\n\t"
10219 +
10220 +#ifdef CONFIG_PAX_REFCOUNT
10221 + "jno 0f\n"
10222 + "mov %1,(%2)\n"
10223 + "int $4\n0:\n"
10224 + _ASM_EXTABLE(0b, 0b)
10225 +#endif
10226 +
10227 /* subtracts 1, returns the old value */
10228 " jns 1f\n\t"
10229 " call call_rwsem_wake\n" /* expects old value in %edx */
10230 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
10231 long tmp;
10232 asm volatile("# beginning __up_write\n\t"
10233 LOCK_PREFIX " xadd %1,(%2)\n\t"
10234 +
10235 +#ifdef CONFIG_PAX_REFCOUNT
10236 + "jno 0f\n"
10237 + "mov %1,(%2)\n"
10238 + "int $4\n0:\n"
10239 + _ASM_EXTABLE(0b, 0b)
10240 +#endif
10241 +
10242 /* subtracts 0xffff0001, returns the old value */
10243 " jns 1f\n\t"
10244 " call call_rwsem_wake\n" /* expects old value in %edx */
10245 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10246 {
10247 asm volatile("# beginning __downgrade_write\n\t"
10248 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10249 +
10250 +#ifdef CONFIG_PAX_REFCOUNT
10251 + "jno 0f\n"
10252 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10253 + "int $4\n0:\n"
10254 + _ASM_EXTABLE(0b, 0b)
10255 +#endif
10256 +
10257 /*
10258 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10259 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10260 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10261 */
10262 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10263 {
10264 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10265 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10266 +
10267 +#ifdef CONFIG_PAX_REFCOUNT
10268 + "jno 0f\n"
10269 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10270 + "int $4\n0:\n"
10271 + _ASM_EXTABLE(0b, 0b)
10272 +#endif
10273 +
10274 : "+m" (sem->count)
10275 : "er" (delta));
10276 }
10277 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10278 */
10279 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10280 {
10281 - return delta + xadd(&sem->count, delta);
10282 + return delta + xadd_check_overflow(&sem->count, delta);
10283 }
10284
10285 #endif /* __KERNEL__ */
10286 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10287 index 5e64171..f58957e 100644
10288 --- a/arch/x86/include/asm/segment.h
10289 +++ b/arch/x86/include/asm/segment.h
10290 @@ -64,10 +64,15 @@
10291 * 26 - ESPFIX small SS
10292 * 27 - per-cpu [ offset to per-cpu data area ]
10293 * 28 - stack_canary-20 [ for stack protector ]
10294 - * 29 - unused
10295 - * 30 - unused
10296 + * 29 - PCI BIOS CS
10297 + * 30 - PCI BIOS DS
10298 * 31 - TSS for double fault handler
10299 */
10300 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10301 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10302 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10303 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10304 +
10305 #define GDT_ENTRY_TLS_MIN 6
10306 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10307
10308 @@ -79,6 +84,8 @@
10309
10310 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10311
10312 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10313 +
10314 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10315
10316 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10317 @@ -104,6 +111,12 @@
10318 #define __KERNEL_STACK_CANARY 0
10319 #endif
10320
10321 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10322 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10323 +
10324 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10325 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10326 +
10327 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10328
10329 /*
10330 @@ -141,7 +154,7 @@
10331 */
10332
10333 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10334 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10335 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10336
10337
10338 #else
10339 @@ -165,6 +178,8 @@
10340 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10341 #define __USER32_DS __USER_DS
10342
10343 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10344 +
10345 #define GDT_ENTRY_TSS 8 /* needs two entries */
10346 #define GDT_ENTRY_LDT 10 /* needs two entries */
10347 #define GDT_ENTRY_TLS_MIN 12
10348 @@ -185,6 +200,7 @@
10349 #endif
10350
10351 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10352 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10353 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10354 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10355 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10356 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10357 index 73b11bc..d4a3b63 100644
10358 --- a/arch/x86/include/asm/smp.h
10359 +++ b/arch/x86/include/asm/smp.h
10360 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10361 /* cpus sharing the last level cache: */
10362 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10363 DECLARE_PER_CPU(u16, cpu_llc_id);
10364 -DECLARE_PER_CPU(int, cpu_number);
10365 +DECLARE_PER_CPU(unsigned int, cpu_number);
10366
10367 static inline struct cpumask *cpu_sibling_mask(int cpu)
10368 {
10369 @@ -77,7 +77,7 @@ struct smp_ops {
10370
10371 void (*send_call_func_ipi)(const struct cpumask *mask);
10372 void (*send_call_func_single_ipi)(int cpu);
10373 -};
10374 +} __no_const;
10375
10376 /* Globals due to paravirt */
10377 extern void set_cpu_sibling_map(int cpu);
10378 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10379 extern int safe_smp_processor_id(void);
10380
10381 #elif defined(CONFIG_X86_64_SMP)
10382 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10383 -
10384 -#define stack_smp_processor_id() \
10385 -({ \
10386 - struct thread_info *ti; \
10387 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10388 - ti->cpu; \
10389 -})
10390 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10391 +#define stack_smp_processor_id() raw_smp_processor_id()
10392 #define safe_smp_processor_id() smp_processor_id()
10393
10394 #endif
10395 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10396 index 972c260..43ab1fd 100644
10397 --- a/arch/x86/include/asm/spinlock.h
10398 +++ b/arch/x86/include/asm/spinlock.h
10399 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10400 static inline void arch_read_lock(arch_rwlock_t *rw)
10401 {
10402 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10403 +
10404 +#ifdef CONFIG_PAX_REFCOUNT
10405 + "jno 0f\n"
10406 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10407 + "int $4\n0:\n"
10408 + _ASM_EXTABLE(0b, 0b)
10409 +#endif
10410 +
10411 "jns 1f\n"
10412 "call __read_lock_failed\n\t"
10413 "1:\n"
10414 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10415 static inline void arch_write_lock(arch_rwlock_t *rw)
10416 {
10417 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10418 +
10419 +#ifdef CONFIG_PAX_REFCOUNT
10420 + "jno 0f\n"
10421 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10422 + "int $4\n0:\n"
10423 + _ASM_EXTABLE(0b, 0b)
10424 +#endif
10425 +
10426 "jz 1f\n"
10427 "call __write_lock_failed\n\t"
10428 "1:\n"
10429 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10430
10431 static inline void arch_read_unlock(arch_rwlock_t *rw)
10432 {
10433 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10434 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10435 +
10436 +#ifdef CONFIG_PAX_REFCOUNT
10437 + "jno 0f\n"
10438 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10439 + "int $4\n0:\n"
10440 + _ASM_EXTABLE(0b, 0b)
10441 +#endif
10442 +
10443 :"+m" (rw->lock) : : "memory");
10444 }
10445
10446 static inline void arch_write_unlock(arch_rwlock_t *rw)
10447 {
10448 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10449 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10450 +
10451 +#ifdef CONFIG_PAX_REFCOUNT
10452 + "jno 0f\n"
10453 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10454 + "int $4\n0:\n"
10455 + _ASM_EXTABLE(0b, 0b)
10456 +#endif
10457 +
10458 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10459 }
10460
10461 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10462 index 1575177..cb23f52 100644
10463 --- a/arch/x86/include/asm/stackprotector.h
10464 +++ b/arch/x86/include/asm/stackprotector.h
10465 @@ -48,7 +48,7 @@
10466 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10467 */
10468 #define GDT_STACK_CANARY_INIT \
10469 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10470 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10471
10472 /*
10473 * Initialize the stackprotector canary value.
10474 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10475
10476 static inline void load_stack_canary_segment(void)
10477 {
10478 -#ifdef CONFIG_X86_32
10479 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10480 asm volatile ("mov %0, %%gs" : : "r" (0));
10481 #endif
10482 }
10483 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10484 index 70bbe39..4ae2bd4 100644
10485 --- a/arch/x86/include/asm/stacktrace.h
10486 +++ b/arch/x86/include/asm/stacktrace.h
10487 @@ -11,28 +11,20 @@
10488
10489 extern int kstack_depth_to_print;
10490
10491 -struct thread_info;
10492 +struct task_struct;
10493 struct stacktrace_ops;
10494
10495 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10496 - unsigned long *stack,
10497 - unsigned long bp,
10498 - const struct stacktrace_ops *ops,
10499 - void *data,
10500 - unsigned long *end,
10501 - int *graph);
10502 +typedef unsigned long walk_stack_t(struct task_struct *task,
10503 + void *stack_start,
10504 + unsigned long *stack,
10505 + unsigned long bp,
10506 + const struct stacktrace_ops *ops,
10507 + void *data,
10508 + unsigned long *end,
10509 + int *graph);
10510
10511 -extern unsigned long
10512 -print_context_stack(struct thread_info *tinfo,
10513 - unsigned long *stack, unsigned long bp,
10514 - const struct stacktrace_ops *ops, void *data,
10515 - unsigned long *end, int *graph);
10516 -
10517 -extern unsigned long
10518 -print_context_stack_bp(struct thread_info *tinfo,
10519 - unsigned long *stack, unsigned long bp,
10520 - const struct stacktrace_ops *ops, void *data,
10521 - unsigned long *end, int *graph);
10522 +extern walk_stack_t print_context_stack;
10523 +extern walk_stack_t print_context_stack_bp;
10524
10525 /* Generic stack tracer with callbacks */
10526
10527 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10528 void (*address)(void *data, unsigned long address, int reliable);
10529 /* On negative return stop dumping */
10530 int (*stack)(void *data, char *name);
10531 - walk_stack_t walk_stack;
10532 + walk_stack_t *walk_stack;
10533 };
10534
10535 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10536 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10537 index cb23852..2dde194 100644
10538 --- a/arch/x86/include/asm/sys_ia32.h
10539 +++ b/arch/x86/include/asm/sys_ia32.h
10540 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10541 compat_sigset_t __user *, unsigned int);
10542 asmlinkage long sys32_alarm(unsigned int);
10543
10544 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10545 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10546 asmlinkage long sys32_sysfs(int, u32, u32);
10547
10548 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10549 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10550 index 2d2f01c..f985723 100644
10551 --- a/arch/x86/include/asm/system.h
10552 +++ b/arch/x86/include/asm/system.h
10553 @@ -129,7 +129,7 @@ do { \
10554 "call __switch_to\n\t" \
10555 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10556 __switch_canary \
10557 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10558 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10559 "movq %%rax,%%rdi\n\t" \
10560 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10561 "jnz ret_from_fork\n\t" \
10562 @@ -140,7 +140,7 @@ do { \
10563 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10564 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10565 [_tif_fork] "i" (_TIF_FORK), \
10566 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10567 + [thread_info] "m" (current_tinfo), \
10568 [current_task] "m" (current_task) \
10569 __switch_canary_iparam \
10570 : "memory", "cc" __EXTRA_CLOBBER)
10571 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10572 {
10573 unsigned long __limit;
10574 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10575 - return __limit + 1;
10576 + return __limit;
10577 }
10578
10579 static inline void native_clts(void)
10580 @@ -397,13 +397,13 @@ void enable_hlt(void);
10581
10582 void cpu_idle_wait(void);
10583
10584 -extern unsigned long arch_align_stack(unsigned long sp);
10585 +#define arch_align_stack(x) ((x) & ~0xfUL)
10586 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10587
10588 void default_idle(void);
10589 bool set_pm_idle_to_default(void);
10590
10591 -void stop_this_cpu(void *dummy);
10592 +void stop_this_cpu(void *dummy) __noreturn;
10593
10594 /*
10595 * Force strict CPU ordering.
10596 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10597 index d7ef849..6af292e 100644
10598 --- a/arch/x86/include/asm/thread_info.h
10599 +++ b/arch/x86/include/asm/thread_info.h
10600 @@ -10,6 +10,7 @@
10601 #include <linux/compiler.h>
10602 #include <asm/page.h>
10603 #include <asm/types.h>
10604 +#include <asm/percpu.h>
10605
10606 /*
10607 * low level task data that entry.S needs immediate access to
10608 @@ -24,7 +25,6 @@ struct exec_domain;
10609 #include <linux/atomic.h>
10610
10611 struct thread_info {
10612 - struct task_struct *task; /* main task structure */
10613 struct exec_domain *exec_domain; /* execution domain */
10614 __u32 flags; /* low level flags */
10615 __u32 status; /* thread synchronous flags */
10616 @@ -34,18 +34,12 @@ struct thread_info {
10617 mm_segment_t addr_limit;
10618 struct restart_block restart_block;
10619 void __user *sysenter_return;
10620 -#ifdef CONFIG_X86_32
10621 - unsigned long previous_esp; /* ESP of the previous stack in
10622 - case of nested (IRQ) stacks
10623 - */
10624 - __u8 supervisor_stack[0];
10625 -#endif
10626 + unsigned long lowest_stack;
10627 int uaccess_err;
10628 };
10629
10630 -#define INIT_THREAD_INFO(tsk) \
10631 +#define INIT_THREAD_INFO \
10632 { \
10633 - .task = &tsk, \
10634 .exec_domain = &default_exec_domain, \
10635 .flags = 0, \
10636 .cpu = 0, \
10637 @@ -56,7 +50,7 @@ struct thread_info {
10638 }, \
10639 }
10640
10641 -#define init_thread_info (init_thread_union.thread_info)
10642 +#define init_thread_info (init_thread_union.stack)
10643 #define init_stack (init_thread_union.stack)
10644
10645 #else /* !__ASSEMBLY__ */
10646 @@ -170,45 +164,40 @@ struct thread_info {
10647 ret; \
10648 })
10649
10650 -#ifdef CONFIG_X86_32
10651 -
10652 -#define STACK_WARN (THREAD_SIZE/8)
10653 -/*
10654 - * macros/functions for gaining access to the thread information structure
10655 - *
10656 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10657 - */
10658 -#ifndef __ASSEMBLY__
10659 -
10660 -
10661 -/* how to get the current stack pointer from C */
10662 -register unsigned long current_stack_pointer asm("esp") __used;
10663 -
10664 -/* how to get the thread information struct from C */
10665 -static inline struct thread_info *current_thread_info(void)
10666 -{
10667 - return (struct thread_info *)
10668 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10669 -}
10670 -
10671 -#else /* !__ASSEMBLY__ */
10672 -
10673 +#ifdef __ASSEMBLY__
10674 /* how to get the thread information struct from ASM */
10675 #define GET_THREAD_INFO(reg) \
10676 - movl $-THREAD_SIZE, reg; \
10677 - andl %esp, reg
10678 + mov PER_CPU_VAR(current_tinfo), reg
10679
10680 /* use this one if reg already contains %esp */
10681 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10682 - andl $-THREAD_SIZE, reg
10683 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10684 +#else
10685 +/* how to get the thread information struct from C */
10686 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10687 +
10688 +static __always_inline struct thread_info *current_thread_info(void)
10689 +{
10690 + return percpu_read_stable(current_tinfo);
10691 +}
10692 +#endif
10693 +
10694 +#ifdef CONFIG_X86_32
10695 +
10696 +#define STACK_WARN (THREAD_SIZE/8)
10697 +/*
10698 + * macros/functions for gaining access to the thread information structure
10699 + *
10700 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10701 + */
10702 +#ifndef __ASSEMBLY__
10703 +
10704 +/* how to get the current stack pointer from C */
10705 +register unsigned long current_stack_pointer asm("esp") __used;
10706
10707 #endif
10708
10709 #else /* X86_32 */
10710
10711 -#include <asm/percpu.h>
10712 -#define KERNEL_STACK_OFFSET (5*8)
10713 -
10714 /*
10715 * macros/functions for gaining access to the thread information structure
10716 * preempt_count needs to be 1 initially, until the scheduler is functional.
10717 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10718 #ifndef __ASSEMBLY__
10719 DECLARE_PER_CPU(unsigned long, kernel_stack);
10720
10721 -static inline struct thread_info *current_thread_info(void)
10722 -{
10723 - struct thread_info *ti;
10724 - ti = (void *)(percpu_read_stable(kernel_stack) +
10725 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10726 - return ti;
10727 -}
10728 -
10729 -#else /* !__ASSEMBLY__ */
10730 -
10731 -/* how to get the thread information struct from ASM */
10732 -#define GET_THREAD_INFO(reg) \
10733 - movq PER_CPU_VAR(kernel_stack),reg ; \
10734 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10735 -
10736 +/* how to get the current stack pointer from C */
10737 +register unsigned long current_stack_pointer asm("rsp") __used;
10738 #endif
10739
10740 #endif /* !X86_32 */
10741 @@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
10742 extern void free_thread_info(struct thread_info *ti);
10743 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10744 #define arch_task_cache_init arch_task_cache_init
10745 +
10746 +#define __HAVE_THREAD_FUNCTIONS
10747 +#define task_thread_info(task) (&(task)->tinfo)
10748 +#define task_stack_page(task) ((task)->stack)
10749 +#define setup_thread_stack(p, org) do {} while (0)
10750 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10751 +
10752 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10753 +extern struct task_struct *alloc_task_struct_node(int node);
10754 +extern void free_task_struct(struct task_struct *);
10755 +
10756 #endif
10757 #endif /* _ASM_X86_THREAD_INFO_H */
10758 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10759 index 36361bf..324f262 100644
10760 --- a/arch/x86/include/asm/uaccess.h
10761 +++ b/arch/x86/include/asm/uaccess.h
10762 @@ -7,12 +7,15 @@
10763 #include <linux/compiler.h>
10764 #include <linux/thread_info.h>
10765 #include <linux/string.h>
10766 +#include <linux/sched.h>
10767 #include <asm/asm.h>
10768 #include <asm/page.h>
10769
10770 #define VERIFY_READ 0
10771 #define VERIFY_WRITE 1
10772
10773 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10774 +
10775 /*
10776 * The fs value determines whether argument validity checking should be
10777 * performed or not. If get_fs() == USER_DS, checking is performed, with
10778 @@ -28,7 +31,12 @@
10779
10780 #define get_ds() (KERNEL_DS)
10781 #define get_fs() (current_thread_info()->addr_limit)
10782 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10783 +void __set_fs(mm_segment_t x);
10784 +void set_fs(mm_segment_t x);
10785 +#else
10786 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10787 +#endif
10788
10789 #define segment_eq(a, b) ((a).seg == (b).seg)
10790
10791 @@ -76,7 +84,33 @@
10792 * checks that the pointer is in the user space range - after calling
10793 * this function, memory access functions may still return -EFAULT.
10794 */
10795 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10796 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10797 +#define access_ok(type, addr, size) \
10798 +({ \
10799 + long __size = size; \
10800 + unsigned long __addr = (unsigned long)addr; \
10801 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10802 + unsigned long __end_ao = __addr + __size - 1; \
10803 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10804 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10805 + while(__addr_ao <= __end_ao) { \
10806 + char __c_ao; \
10807 + __addr_ao += PAGE_SIZE; \
10808 + if (__size > PAGE_SIZE) \
10809 + cond_resched(); \
10810 + if (__get_user(__c_ao, (char __user *)__addr)) \
10811 + break; \
10812 + if (type != VERIFY_WRITE) { \
10813 + __addr = __addr_ao; \
10814 + continue; \
10815 + } \
10816 + if (__put_user(__c_ao, (char __user *)__addr)) \
10817 + break; \
10818 + __addr = __addr_ao; \
10819 + } \
10820 + } \
10821 + __ret_ao; \
10822 +})
10823
10824 /*
10825 * The exception table consists of pairs of addresses: the first is the
10826 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10827 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10828 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10829
10830 -
10831 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10832 +#define __copyuser_seg "gs;"
10833 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10834 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10835 +#else
10836 +#define __copyuser_seg
10837 +#define __COPYUSER_SET_ES
10838 +#define __COPYUSER_RESTORE_ES
10839 +#endif
10840
10841 #ifdef CONFIG_X86_32
10842 #define __put_user_asm_u64(x, addr, err, errret) \
10843 - asm volatile("1: movl %%eax,0(%2)\n" \
10844 - "2: movl %%edx,4(%2)\n" \
10845 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10846 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10847 "3:\n" \
10848 ".section .fixup,\"ax\"\n" \
10849 "4: movl %3,%0\n" \
10850 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10851 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10852
10853 #define __put_user_asm_ex_u64(x, addr) \
10854 - asm volatile("1: movl %%eax,0(%1)\n" \
10855 - "2: movl %%edx,4(%1)\n" \
10856 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10857 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10858 "3:\n" \
10859 _ASM_EXTABLE(1b, 2b - 1b) \
10860 _ASM_EXTABLE(2b, 3b - 2b) \
10861 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10862 __typeof__(*(ptr)) __pu_val; \
10863 __chk_user_ptr(ptr); \
10864 might_fault(); \
10865 - __pu_val = x; \
10866 + __pu_val = (x); \
10867 switch (sizeof(*(ptr))) { \
10868 case 1: \
10869 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10870 @@ -373,7 +415,7 @@ do { \
10871 } while (0)
10872
10873 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10874 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10875 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10876 "2:\n" \
10877 ".section .fixup,\"ax\"\n" \
10878 "3: mov %3,%0\n" \
10879 @@ -381,7 +423,7 @@ do { \
10880 " jmp 2b\n" \
10881 ".previous\n" \
10882 _ASM_EXTABLE(1b, 3b) \
10883 - : "=r" (err), ltype(x) \
10884 + : "=r" (err), ltype (x) \
10885 : "m" (__m(addr)), "i" (errret), "0" (err))
10886
10887 #define __get_user_size_ex(x, ptr, size) \
10888 @@ -406,7 +448,7 @@ do { \
10889 } while (0)
10890
10891 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10892 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10893 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10894 "2:\n" \
10895 _ASM_EXTABLE(1b, 2b - 1b) \
10896 : ltype(x) : "m" (__m(addr)))
10897 @@ -423,13 +465,24 @@ do { \
10898 int __gu_err; \
10899 unsigned long __gu_val; \
10900 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10901 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10902 + (x) = (__typeof__(*(ptr)))__gu_val; \
10903 __gu_err; \
10904 })
10905
10906 /* FIXME: this hack is definitely wrong -AK */
10907 struct __large_struct { unsigned long buf[100]; };
10908 -#define __m(x) (*(struct __large_struct __user *)(x))
10909 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10910 +#define ____m(x) \
10911 +({ \
10912 + unsigned long ____x = (unsigned long)(x); \
10913 + if (____x < PAX_USER_SHADOW_BASE) \
10914 + ____x += PAX_USER_SHADOW_BASE; \
10915 + (void __user *)____x; \
10916 +})
10917 +#else
10918 +#define ____m(x) (x)
10919 +#endif
10920 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10921
10922 /*
10923 * Tell gcc we read from memory instead of writing: this is because
10924 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10925 * aliasing issues.
10926 */
10927 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10928 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10929 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10930 "2:\n" \
10931 ".section .fixup,\"ax\"\n" \
10932 "3: mov %3,%0\n" \
10933 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10934 ".previous\n" \
10935 _ASM_EXTABLE(1b, 3b) \
10936 : "=r"(err) \
10937 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10938 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10939
10940 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10941 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10942 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10943 "2:\n" \
10944 _ASM_EXTABLE(1b, 2b - 1b) \
10945 : : ltype(x), "m" (__m(addr)))
10946 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10947 * On error, the variable @x is set to zero.
10948 */
10949
10950 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10951 +#define __get_user(x, ptr) get_user((x), (ptr))
10952 +#else
10953 #define __get_user(x, ptr) \
10954 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10955 +#endif
10956
10957 /**
10958 * __put_user: - Write a simple value into user space, with less checking.
10959 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10960 * Returns zero on success, or -EFAULT on error.
10961 */
10962
10963 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10964 +#define __put_user(x, ptr) put_user((x), (ptr))
10965 +#else
10966 #define __put_user(x, ptr) \
10967 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10968 +#endif
10969
10970 #define __get_user_unaligned __get_user
10971 #define __put_user_unaligned __put_user
10972 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10973 #define get_user_ex(x, ptr) do { \
10974 unsigned long __gue_val; \
10975 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10976 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10977 + (x) = (__typeof__(*(ptr)))__gue_val; \
10978 } while (0)
10979
10980 #ifdef CONFIG_X86_WP_WORKS_OK
10981 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10982 index 566e803..b9521e9 100644
10983 --- a/arch/x86/include/asm/uaccess_32.h
10984 +++ b/arch/x86/include/asm/uaccess_32.h
10985 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10986 static __always_inline unsigned long __must_check
10987 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10988 {
10989 + if ((long)n < 0)
10990 + return n;
10991 +
10992 if (__builtin_constant_p(n)) {
10993 unsigned long ret;
10994
10995 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10996 return ret;
10997 }
10998 }
10999 + if (!__builtin_constant_p(n))
11000 + check_object_size(from, n, true);
11001 return __copy_to_user_ll(to, from, n);
11002 }
11003
11004 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
11005 __copy_to_user(void __user *to, const void *from, unsigned long n)
11006 {
11007 might_fault();
11008 +
11009 return __copy_to_user_inatomic(to, from, n);
11010 }
11011
11012 static __always_inline unsigned long
11013 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
11014 {
11015 + if ((long)n < 0)
11016 + return n;
11017 +
11018 /* Avoid zeroing the tail if the copy fails..
11019 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
11020 * but as the zeroing behaviour is only significant when n is not
11021 @@ -137,6 +146,10 @@ static __always_inline unsigned long
11022 __copy_from_user(void *to, const void __user *from, unsigned long n)
11023 {
11024 might_fault();
11025 +
11026 + if ((long)n < 0)
11027 + return n;
11028 +
11029 if (__builtin_constant_p(n)) {
11030 unsigned long ret;
11031
11032 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
11033 return ret;
11034 }
11035 }
11036 + if (!__builtin_constant_p(n))
11037 + check_object_size(to, n, false);
11038 return __copy_from_user_ll(to, from, n);
11039 }
11040
11041 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
11042 const void __user *from, unsigned long n)
11043 {
11044 might_fault();
11045 +
11046 + if ((long)n < 0)
11047 + return n;
11048 +
11049 if (__builtin_constant_p(n)) {
11050 unsigned long ret;
11051
11052 @@ -181,15 +200,19 @@ static __always_inline unsigned long
11053 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
11054 unsigned long n)
11055 {
11056 - return __copy_from_user_ll_nocache_nozero(to, from, n);
11057 + if ((long)n < 0)
11058 + return n;
11059 +
11060 + return __copy_from_user_ll_nocache_nozero(to, from, n);
11061 }
11062
11063 -unsigned long __must_check copy_to_user(void __user *to,
11064 - const void *from, unsigned long n);
11065 -unsigned long __must_check _copy_from_user(void *to,
11066 - const void __user *from,
11067 - unsigned long n);
11068 -
11069 +extern void copy_to_user_overflow(void)
11070 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11071 + __compiletime_error("copy_to_user() buffer size is not provably correct")
11072 +#else
11073 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
11074 +#endif
11075 +;
11076
11077 extern void copy_from_user_overflow(void)
11078 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
11079 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
11080 #endif
11081 ;
11082
11083 -static inline unsigned long __must_check copy_from_user(void *to,
11084 - const void __user *from,
11085 - unsigned long n)
11086 +/**
11087 + * copy_to_user: - Copy a block of data into user space.
11088 + * @to: Destination address, in user space.
11089 + * @from: Source address, in kernel space.
11090 + * @n: Number of bytes to copy.
11091 + *
11092 + * Context: User context only. This function may sleep.
11093 + *
11094 + * Copy data from kernel space to user space.
11095 + *
11096 + * Returns number of bytes that could not be copied.
11097 + * On success, this will be zero.
11098 + */
11099 +static inline unsigned long __must_check
11100 +copy_to_user(void __user *to, const void *from, unsigned long n)
11101 +{
11102 + int sz = __compiletime_object_size(from);
11103 +
11104 + if (unlikely(sz != -1 && sz < n))
11105 + copy_to_user_overflow();
11106 + else if (access_ok(VERIFY_WRITE, to, n))
11107 + n = __copy_to_user(to, from, n);
11108 + return n;
11109 +}
11110 +
11111 +/**
11112 + * copy_from_user: - Copy a block of data from user space.
11113 + * @to: Destination address, in kernel space.
11114 + * @from: Source address, in user space.
11115 + * @n: Number of bytes to copy.
11116 + *
11117 + * Context: User context only. This function may sleep.
11118 + *
11119 + * Copy data from user space to kernel space.
11120 + *
11121 + * Returns number of bytes that could not be copied.
11122 + * On success, this will be zero.
11123 + *
11124 + * If some data could not be copied, this function will pad the copied
11125 + * data to the requested size using zero bytes.
11126 + */
11127 +static inline unsigned long __must_check
11128 +copy_from_user(void *to, const void __user *from, unsigned long n)
11129 {
11130 int sz = __compiletime_object_size(to);
11131
11132 - if (likely(sz == -1 || sz >= n))
11133 - n = _copy_from_user(to, from, n);
11134 - else
11135 + if (unlikely(sz != -1 && sz < n))
11136 copy_from_user_overflow();
11137 -
11138 + else if (access_ok(VERIFY_READ, from, n))
11139 + n = __copy_from_user(to, from, n);
11140 + else if ((long)n > 0) {
11141 + if (!__builtin_constant_p(n))
11142 + check_object_size(to, n, false);
11143 + memset(to, 0, n);
11144 + }
11145 return n;
11146 }
11147
11148 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
11149 index 1c66d30..e66922c 100644
11150 --- a/arch/x86/include/asm/uaccess_64.h
11151 +++ b/arch/x86/include/asm/uaccess_64.h
11152 @@ -10,6 +10,9 @@
11153 #include <asm/alternative.h>
11154 #include <asm/cpufeature.h>
11155 #include <asm/page.h>
11156 +#include <asm/pgtable.h>
11157 +
11158 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
11159
11160 /*
11161 * Copy To/From Userspace
11162 @@ -17,12 +20,12 @@
11163
11164 /* Handles exceptions in both to and from, but doesn't do access_ok */
11165 __must_check unsigned long
11166 -copy_user_generic_string(void *to, const void *from, unsigned len);
11167 +copy_user_generic_string(void *to, const void *from, unsigned long len);
11168 __must_check unsigned long
11169 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
11170 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
11171
11172 static __always_inline __must_check unsigned long
11173 -copy_user_generic(void *to, const void *from, unsigned len)
11174 +copy_user_generic(void *to, const void *from, unsigned long len)
11175 {
11176 unsigned ret;
11177
11178 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
11179 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
11180 "=d" (len)),
11181 "1" (to), "2" (from), "3" (len)
11182 - : "memory", "rcx", "r8", "r9", "r10", "r11");
11183 + : "memory", "rcx", "r8", "r9", "r11");
11184 return ret;
11185 }
11186
11187 +static __always_inline __must_check unsigned long
11188 +__copy_to_user(void __user *to, const void *from, unsigned long len);
11189 +static __always_inline __must_check unsigned long
11190 +__copy_from_user(void *to, const void __user *from, unsigned long len);
11191 __must_check unsigned long
11192 -_copy_to_user(void __user *to, const void *from, unsigned len);
11193 -__must_check unsigned long
11194 -_copy_from_user(void *to, const void __user *from, unsigned len);
11195 -__must_check unsigned long
11196 -copy_in_user(void __user *to, const void __user *from, unsigned len);
11197 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
11198
11199 static inline unsigned long __must_check copy_from_user(void *to,
11200 const void __user *from,
11201 unsigned long n)
11202 {
11203 - int sz = __compiletime_object_size(to);
11204 -
11205 might_fault();
11206 - if (likely(sz == -1 || sz >= n))
11207 - n = _copy_from_user(to, from, n);
11208 -#ifdef CONFIG_DEBUG_VM
11209 - else
11210 - WARN(1, "Buffer overflow detected!\n");
11211 -#endif
11212 +
11213 + if (access_ok(VERIFY_READ, from, n))
11214 + n = __copy_from_user(to, from, n);
11215 + else if (n < INT_MAX) {
11216 + if (!__builtin_constant_p(n))
11217 + check_object_size(to, n, false);
11218 + memset(to, 0, n);
11219 + }
11220 return n;
11221 }
11222
11223 static __always_inline __must_check
11224 -int copy_to_user(void __user *dst, const void *src, unsigned size)
11225 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
11226 {
11227 might_fault();
11228
11229 - return _copy_to_user(dst, src, size);
11230 + if (access_ok(VERIFY_WRITE, dst, size))
11231 + size = __copy_to_user(dst, src, size);
11232 + return size;
11233 }
11234
11235 static __always_inline __must_check
11236 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
11237 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11238 {
11239 - int ret = 0;
11240 + int sz = __compiletime_object_size(dst);
11241 + unsigned ret = 0;
11242
11243 might_fault();
11244 - if (!__builtin_constant_p(size))
11245 - return copy_user_generic(dst, (__force void *)src, size);
11246 +
11247 + if (size > INT_MAX)
11248 + return size;
11249 +
11250 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11251 + if (!__access_ok(VERIFY_READ, src, size))
11252 + return size;
11253 +#endif
11254 +
11255 + if (unlikely(sz != -1 && sz < size)) {
11256 +#ifdef CONFIG_DEBUG_VM
11257 + WARN(1, "Buffer overflow detected!\n");
11258 +#endif
11259 + return size;
11260 + }
11261 +
11262 + if (!__builtin_constant_p(size)) {
11263 + check_object_size(dst, size, false);
11264 +
11265 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11266 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11267 + src += PAX_USER_SHADOW_BASE;
11268 +#endif
11269 +
11270 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11271 + }
11272 switch (size) {
11273 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11274 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11275 ret, "b", "b", "=q", 1);
11276 return ret;
11277 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11278 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11279 ret, "w", "w", "=r", 2);
11280 return ret;
11281 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11282 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11283 ret, "l", "k", "=r", 4);
11284 return ret;
11285 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11286 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11287 ret, "q", "", "=r", 8);
11288 return ret;
11289 case 10:
11290 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11291 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11292 ret, "q", "", "=r", 10);
11293 if (unlikely(ret))
11294 return ret;
11295 __get_user_asm(*(u16 *)(8 + (char *)dst),
11296 - (u16 __user *)(8 + (char __user *)src),
11297 + (const u16 __user *)(8 + (const char __user *)src),
11298 ret, "w", "w", "=r", 2);
11299 return ret;
11300 case 16:
11301 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11302 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11303 ret, "q", "", "=r", 16);
11304 if (unlikely(ret))
11305 return ret;
11306 __get_user_asm(*(u64 *)(8 + (char *)dst),
11307 - (u64 __user *)(8 + (char __user *)src),
11308 + (const u64 __user *)(8 + (const char __user *)src),
11309 ret, "q", "", "=r", 8);
11310 return ret;
11311 default:
11312 - return copy_user_generic(dst, (__force void *)src, size);
11313 +
11314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11315 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11316 + src += PAX_USER_SHADOW_BASE;
11317 +#endif
11318 +
11319 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11320 }
11321 }
11322
11323 static __always_inline __must_check
11324 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11325 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11326 {
11327 - int ret = 0;
11328 + int sz = __compiletime_object_size(src);
11329 + unsigned ret = 0;
11330
11331 might_fault();
11332 - if (!__builtin_constant_p(size))
11333 - return copy_user_generic((__force void *)dst, src, size);
11334 +
11335 + if (size > INT_MAX)
11336 + return size;
11337 +
11338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11339 + if (!__access_ok(VERIFY_WRITE, dst, size))
11340 + return size;
11341 +#endif
11342 +
11343 + if (unlikely(sz != -1 && sz < size)) {
11344 +#ifdef CONFIG_DEBUG_VM
11345 + WARN(1, "Buffer overflow detected!\n");
11346 +#endif
11347 + return size;
11348 + }
11349 +
11350 + if (!__builtin_constant_p(size)) {
11351 + check_object_size(src, size, true);
11352 +
11353 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11354 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11355 + dst += PAX_USER_SHADOW_BASE;
11356 +#endif
11357 +
11358 + return copy_user_generic((__force_kernel void *)dst, src, size);
11359 + }
11360 switch (size) {
11361 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11362 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11363 ret, "b", "b", "iq", 1);
11364 return ret;
11365 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11366 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11367 ret, "w", "w", "ir", 2);
11368 return ret;
11369 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11370 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11371 ret, "l", "k", "ir", 4);
11372 return ret;
11373 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11374 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11375 ret, "q", "", "er", 8);
11376 return ret;
11377 case 10:
11378 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11379 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11380 ret, "q", "", "er", 10);
11381 if (unlikely(ret))
11382 return ret;
11383 asm("":::"memory");
11384 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11385 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11386 ret, "w", "w", "ir", 2);
11387 return ret;
11388 case 16:
11389 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11390 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11391 ret, "q", "", "er", 16);
11392 if (unlikely(ret))
11393 return ret;
11394 asm("":::"memory");
11395 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11396 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11397 ret, "q", "", "er", 8);
11398 return ret;
11399 default:
11400 - return copy_user_generic((__force void *)dst, src, size);
11401 +
11402 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11403 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11404 + dst += PAX_USER_SHADOW_BASE;
11405 +#endif
11406 +
11407 + return copy_user_generic((__force_kernel void *)dst, src, size);
11408 }
11409 }
11410
11411 static __always_inline __must_check
11412 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11413 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11414 {
11415 - int ret = 0;
11416 + unsigned ret = 0;
11417
11418 might_fault();
11419 - if (!__builtin_constant_p(size))
11420 - return copy_user_generic((__force void *)dst,
11421 - (__force void *)src, size);
11422 +
11423 + if (size > INT_MAX)
11424 + return size;
11425 +
11426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11427 + if (!__access_ok(VERIFY_READ, src, size))
11428 + return size;
11429 + if (!__access_ok(VERIFY_WRITE, dst, size))
11430 + return size;
11431 +#endif
11432 +
11433 + if (!__builtin_constant_p(size)) {
11434 +
11435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11436 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11437 + src += PAX_USER_SHADOW_BASE;
11438 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11439 + dst += PAX_USER_SHADOW_BASE;
11440 +#endif
11441 +
11442 + return copy_user_generic((__force_kernel void *)dst,
11443 + (__force_kernel const void *)src, size);
11444 + }
11445 switch (size) {
11446 case 1: {
11447 u8 tmp;
11448 - __get_user_asm(tmp, (u8 __user *)src,
11449 + __get_user_asm(tmp, (const u8 __user *)src,
11450 ret, "b", "b", "=q", 1);
11451 if (likely(!ret))
11452 __put_user_asm(tmp, (u8 __user *)dst,
11453 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11454 }
11455 case 2: {
11456 u16 tmp;
11457 - __get_user_asm(tmp, (u16 __user *)src,
11458 + __get_user_asm(tmp, (const u16 __user *)src,
11459 ret, "w", "w", "=r", 2);
11460 if (likely(!ret))
11461 __put_user_asm(tmp, (u16 __user *)dst,
11462 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11463
11464 case 4: {
11465 u32 tmp;
11466 - __get_user_asm(tmp, (u32 __user *)src,
11467 + __get_user_asm(tmp, (const u32 __user *)src,
11468 ret, "l", "k", "=r", 4);
11469 if (likely(!ret))
11470 __put_user_asm(tmp, (u32 __user *)dst,
11471 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11472 }
11473 case 8: {
11474 u64 tmp;
11475 - __get_user_asm(tmp, (u64 __user *)src,
11476 + __get_user_asm(tmp, (const u64 __user *)src,
11477 ret, "q", "", "=r", 8);
11478 if (likely(!ret))
11479 __put_user_asm(tmp, (u64 __user *)dst,
11480 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11481 return ret;
11482 }
11483 default:
11484 - return copy_user_generic((__force void *)dst,
11485 - (__force void *)src, size);
11486 +
11487 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11488 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11489 + src += PAX_USER_SHADOW_BASE;
11490 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11491 + dst += PAX_USER_SHADOW_BASE;
11492 +#endif
11493 +
11494 + return copy_user_generic((__force_kernel void *)dst,
11495 + (__force_kernel const void *)src, size);
11496 }
11497 }
11498
11499 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11500 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11501
11502 static __must_check __always_inline int
11503 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11504 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11505 {
11506 - return copy_user_generic(dst, (__force const void *)src, size);
11507 + if (size > INT_MAX)
11508 + return size;
11509 +
11510 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11511 + if (!__access_ok(VERIFY_READ, src, size))
11512 + return size;
11513 +
11514 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11515 + src += PAX_USER_SHADOW_BASE;
11516 +#endif
11517 +
11518 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11519 }
11520
11521 -static __must_check __always_inline int
11522 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11523 +static __must_check __always_inline unsigned long
11524 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11525 {
11526 - return copy_user_generic((__force void *)dst, src, size);
11527 + if (size > INT_MAX)
11528 + return size;
11529 +
11530 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11531 + if (!__access_ok(VERIFY_WRITE, dst, size))
11532 + return size;
11533 +
11534 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11535 + dst += PAX_USER_SHADOW_BASE;
11536 +#endif
11537 +
11538 + return copy_user_generic((__force_kernel void *)dst, src, size);
11539 }
11540
11541 -extern long __copy_user_nocache(void *dst, const void __user *src,
11542 - unsigned size, int zerorest);
11543 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11544 + unsigned long size, int zerorest);
11545
11546 -static inline int
11547 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11548 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11549 {
11550 might_sleep();
11551 +
11552 + if (size > INT_MAX)
11553 + return size;
11554 +
11555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11556 + if (!__access_ok(VERIFY_READ, src, size))
11557 + return size;
11558 +#endif
11559 +
11560 return __copy_user_nocache(dst, src, size, 1);
11561 }
11562
11563 -static inline int
11564 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11565 - unsigned size)
11566 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11567 + unsigned long size)
11568 {
11569 + if (size > INT_MAX)
11570 + return size;
11571 +
11572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11573 + if (!__access_ok(VERIFY_READ, src, size))
11574 + return size;
11575 +#endif
11576 +
11577 return __copy_user_nocache(dst, src, size, 0);
11578 }
11579
11580 -unsigned long
11581 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11582 +extern unsigned long
11583 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11584
11585 #endif /* _ASM_X86_UACCESS_64_H */
11586 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11587 index bb05228..d763d5b 100644
11588 --- a/arch/x86/include/asm/vdso.h
11589 +++ b/arch/x86/include/asm/vdso.h
11590 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11591 #define VDSO32_SYMBOL(base, name) \
11592 ({ \
11593 extern const char VDSO32_##name[]; \
11594 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11595 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11596 })
11597 #endif
11598
11599 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11600 index 1971e65..1e3559b 100644
11601 --- a/arch/x86/include/asm/x86_init.h
11602 +++ b/arch/x86/include/asm/x86_init.h
11603 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11604 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11605 void (*find_smp_config)(void);
11606 void (*get_smp_config)(unsigned int early);
11607 -};
11608 +} __no_const;
11609
11610 /**
11611 * struct x86_init_resources - platform specific resource related ops
11612 @@ -42,7 +42,7 @@ struct x86_init_resources {
11613 void (*probe_roms)(void);
11614 void (*reserve_resources)(void);
11615 char *(*memory_setup)(void);
11616 -};
11617 +} __no_const;
11618
11619 /**
11620 * struct x86_init_irqs - platform specific interrupt setup
11621 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11622 void (*pre_vector_init)(void);
11623 void (*intr_init)(void);
11624 void (*trap_init)(void);
11625 -};
11626 +} __no_const;
11627
11628 /**
11629 * struct x86_init_oem - oem platform specific customizing functions
11630 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11631 struct x86_init_oem {
11632 void (*arch_setup)(void);
11633 void (*banner)(void);
11634 -};
11635 +} __no_const;
11636
11637 /**
11638 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11639 @@ -76,7 +76,7 @@ struct x86_init_oem {
11640 */
11641 struct x86_init_mapping {
11642 void (*pagetable_reserve)(u64 start, u64 end);
11643 -};
11644 +} __no_const;
11645
11646 /**
11647 * struct x86_init_paging - platform specific paging functions
11648 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11649 struct x86_init_paging {
11650 void (*pagetable_setup_start)(pgd_t *base);
11651 void (*pagetable_setup_done)(pgd_t *base);
11652 -};
11653 +} __no_const;
11654
11655 /**
11656 * struct x86_init_timers - platform specific timer setup
11657 @@ -101,7 +101,7 @@ struct x86_init_timers {
11658 void (*tsc_pre_init)(void);
11659 void (*timer_init)(void);
11660 void (*wallclock_init)(void);
11661 -};
11662 +} __no_const;
11663
11664 /**
11665 * struct x86_init_iommu - platform specific iommu setup
11666 @@ -109,7 +109,7 @@ struct x86_init_timers {
11667 */
11668 struct x86_init_iommu {
11669 int (*iommu_init)(void);
11670 -};
11671 +} __no_const;
11672
11673 /**
11674 * struct x86_init_pci - platform specific pci init functions
11675 @@ -123,7 +123,7 @@ struct x86_init_pci {
11676 int (*init)(void);
11677 void (*init_irq)(void);
11678 void (*fixup_irqs)(void);
11679 -};
11680 +} __no_const;
11681
11682 /**
11683 * struct x86_init_ops - functions for platform specific setup
11684 @@ -139,7 +139,7 @@ struct x86_init_ops {
11685 struct x86_init_timers timers;
11686 struct x86_init_iommu iommu;
11687 struct x86_init_pci pci;
11688 -};
11689 +} __no_const;
11690
11691 /**
11692 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11693 @@ -147,7 +147,7 @@ struct x86_init_ops {
11694 */
11695 struct x86_cpuinit_ops {
11696 void (*setup_percpu_clockev)(void);
11697 -};
11698 +} __no_const;
11699
11700 /**
11701 * struct x86_platform_ops - platform specific runtime functions
11702 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11703 void (*nmi_init)(void);
11704 unsigned char (*get_nmi_reason)(void);
11705 int (*i8042_detect)(void);
11706 -};
11707 +} __no_const;
11708
11709 struct pci_dev;
11710
11711 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11712 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11713 void (*teardown_msi_irq)(unsigned int irq);
11714 void (*teardown_msi_irqs)(struct pci_dev *dev);
11715 -};
11716 +} __no_const;
11717
11718 extern struct x86_init_ops x86_init;
11719 extern struct x86_cpuinit_ops x86_cpuinit;
11720 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11721 index c6ce245..ffbdab7 100644
11722 --- a/arch/x86/include/asm/xsave.h
11723 +++ b/arch/x86/include/asm/xsave.h
11724 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11725 {
11726 int err;
11727
11728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11729 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11730 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11731 +#endif
11732 +
11733 /*
11734 * Clear the xsave header first, so that reserved fields are
11735 * initialized to zero.
11736 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11737 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11738 {
11739 int err;
11740 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11741 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11742 u32 lmask = mask;
11743 u32 hmask = mask >> 32;
11744
11745 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11746 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11747 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11748 +#endif
11749 +
11750 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11751 "2:\n"
11752 ".section .fixup,\"ax\"\n"
11753 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11754 index 6a564ac..9b1340c 100644
11755 --- a/arch/x86/kernel/acpi/realmode/Makefile
11756 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11757 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11758 $(call cc-option, -fno-stack-protector) \
11759 $(call cc-option, -mpreferred-stack-boundary=2)
11760 KBUILD_CFLAGS += $(call cc-option, -m32)
11761 +ifdef CONSTIFY_PLUGIN
11762 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11763 +endif
11764 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11765 GCOV_PROFILE := n
11766
11767 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11768 index b4fd836..4358fe3 100644
11769 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11770 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11771 @@ -108,6 +108,9 @@ wakeup_code:
11772 /* Do any other stuff... */
11773
11774 #ifndef CONFIG_64BIT
11775 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11776 + call verify_cpu
11777 +
11778 /* This could also be done in C code... */
11779 movl pmode_cr3, %eax
11780 movl %eax, %cr3
11781 @@ -131,6 +134,7 @@ wakeup_code:
11782 movl pmode_cr0, %eax
11783 movl %eax, %cr0
11784 jmp pmode_return
11785 +# include "../../verify_cpu.S"
11786 #else
11787 pushw $0
11788 pushw trampoline_segment
11789 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11790 index 103b6ab..2004d0a 100644
11791 --- a/arch/x86/kernel/acpi/sleep.c
11792 +++ b/arch/x86/kernel/acpi/sleep.c
11793 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11794 header->trampoline_segment = trampoline_address() >> 4;
11795 #ifdef CONFIG_SMP
11796 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11797 +
11798 + pax_open_kernel();
11799 early_gdt_descr.address =
11800 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11801 + pax_close_kernel();
11802 +
11803 initial_gs = per_cpu_offset(smp_processor_id());
11804 #endif
11805 initial_code = (unsigned long)wakeup_long64;
11806 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11807 index 13ab720..95d5442 100644
11808 --- a/arch/x86/kernel/acpi/wakeup_32.S
11809 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11810 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11811 # and restore the stack ... but you need gdt for this to work
11812 movl saved_context_esp, %esp
11813
11814 - movl %cs:saved_magic, %eax
11815 - cmpl $0x12345678, %eax
11816 + cmpl $0x12345678, saved_magic
11817 jne bogus_magic
11818
11819 # jump to place where we left off
11820 - movl saved_eip, %eax
11821 - jmp *%eax
11822 + jmp *(saved_eip)
11823
11824 bogus_magic:
11825 jmp bogus_magic
11826 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11827 index 1f84794..e23f862 100644
11828 --- a/arch/x86/kernel/alternative.c
11829 +++ b/arch/x86/kernel/alternative.c
11830 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11831 */
11832 for (a = start; a < end; a++) {
11833 instr = (u8 *)&a->instr_offset + a->instr_offset;
11834 +
11835 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11836 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11837 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11838 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11839 +#endif
11840 +
11841 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11842 BUG_ON(a->replacementlen > a->instrlen);
11843 BUG_ON(a->instrlen > sizeof(insnbuf));
11844 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11845 for (poff = start; poff < end; poff++) {
11846 u8 *ptr = (u8 *)poff + *poff;
11847
11848 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11849 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11850 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11851 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11852 +#endif
11853 +
11854 if (!*poff || ptr < text || ptr >= text_end)
11855 continue;
11856 /* turn DS segment override prefix into lock prefix */
11857 - if (*ptr == 0x3e)
11858 + if (*ktla_ktva(ptr) == 0x3e)
11859 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11860 };
11861 mutex_unlock(&text_mutex);
11862 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11863 for (poff = start; poff < end; poff++) {
11864 u8 *ptr = (u8 *)poff + *poff;
11865
11866 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11867 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11868 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11869 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11870 +#endif
11871 +
11872 if (!*poff || ptr < text || ptr >= text_end)
11873 continue;
11874 /* turn lock prefix into DS segment override prefix */
11875 - if (*ptr == 0xf0)
11876 + if (*ktla_ktva(ptr) == 0xf0)
11877 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11878 };
11879 mutex_unlock(&text_mutex);
11880 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11881
11882 BUG_ON(p->len > MAX_PATCH_LEN);
11883 /* prep the buffer with the original instructions */
11884 - memcpy(insnbuf, p->instr, p->len);
11885 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11886 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11887 (unsigned long)p->instr, p->len);
11888
11889 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11890 if (smp_alt_once)
11891 free_init_pages("SMP alternatives",
11892 (unsigned long)__smp_locks,
11893 - (unsigned long)__smp_locks_end);
11894 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11895
11896 restart_nmi();
11897 }
11898 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11899 * instructions. And on the local CPU you need to be protected again NMI or MCE
11900 * handlers seeing an inconsistent instruction while you patch.
11901 */
11902 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11903 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11904 size_t len)
11905 {
11906 unsigned long flags;
11907 local_irq_save(flags);
11908 - memcpy(addr, opcode, len);
11909 +
11910 + pax_open_kernel();
11911 + memcpy(ktla_ktva(addr), opcode, len);
11912 sync_core();
11913 + pax_close_kernel();
11914 +
11915 local_irq_restore(flags);
11916 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11917 that causes hangs on some VIA CPUs. */
11918 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11919 */
11920 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11921 {
11922 - unsigned long flags;
11923 - char *vaddr;
11924 + unsigned char *vaddr = ktla_ktva(addr);
11925 struct page *pages[2];
11926 - int i;
11927 + size_t i;
11928
11929 if (!core_kernel_text((unsigned long)addr)) {
11930 - pages[0] = vmalloc_to_page(addr);
11931 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11932 + pages[0] = vmalloc_to_page(vaddr);
11933 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11934 } else {
11935 - pages[0] = virt_to_page(addr);
11936 + pages[0] = virt_to_page(vaddr);
11937 WARN_ON(!PageReserved(pages[0]));
11938 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11939 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11940 }
11941 BUG_ON(!pages[0]);
11942 - local_irq_save(flags);
11943 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11944 - if (pages[1])
11945 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11946 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11947 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11948 - clear_fixmap(FIX_TEXT_POKE0);
11949 - if (pages[1])
11950 - clear_fixmap(FIX_TEXT_POKE1);
11951 - local_flush_tlb();
11952 - sync_core();
11953 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11954 - that causes hangs on some VIA CPUs. */
11955 + text_poke_early(addr, opcode, len);
11956 for (i = 0; i < len; i++)
11957 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11958 - local_irq_restore(flags);
11959 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11960 return addr;
11961 }
11962
11963 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11964 index f98d84c..e402a69 100644
11965 --- a/arch/x86/kernel/apic/apic.c
11966 +++ b/arch/x86/kernel/apic/apic.c
11967 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11968 /*
11969 * Debug level, exported for io_apic.c
11970 */
11971 -unsigned int apic_verbosity;
11972 +int apic_verbosity;
11973
11974 int pic_mode;
11975
11976 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11977 apic_write(APIC_ESR, 0);
11978 v1 = apic_read(APIC_ESR);
11979 ack_APIC_irq();
11980 - atomic_inc(&irq_err_count);
11981 + atomic_inc_unchecked(&irq_err_count);
11982
11983 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11984 smp_processor_id(), v0 , v1);
11985 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11986 index 6d939d7..0697fcc 100644
11987 --- a/arch/x86/kernel/apic/io_apic.c
11988 +++ b/arch/x86/kernel/apic/io_apic.c
11989 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11990 }
11991 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11992
11993 -void lock_vector_lock(void)
11994 +void lock_vector_lock(void) __acquires(vector_lock)
11995 {
11996 /* Used to the online set of cpus does not change
11997 * during assign_irq_vector.
11998 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11999 raw_spin_lock(&vector_lock);
12000 }
12001
12002 -void unlock_vector_lock(void)
12003 +void unlock_vector_lock(void) __releases(vector_lock)
12004 {
12005 raw_spin_unlock(&vector_lock);
12006 }
12007 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
12008 ack_APIC_irq();
12009 }
12010
12011 -atomic_t irq_mis_count;
12012 +atomic_unchecked_t irq_mis_count;
12013
12014 static void ack_apic_level(struct irq_data *data)
12015 {
12016 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
12017 * at the cpu.
12018 */
12019 if (!(v & (1 << (i & 0x1f)))) {
12020 - atomic_inc(&irq_mis_count);
12021 + atomic_inc_unchecked(&irq_mis_count);
12022
12023 eoi_ioapic_irq(irq, cfg);
12024 }
12025 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
12026 index a46bd38..6b906d7 100644
12027 --- a/arch/x86/kernel/apm_32.c
12028 +++ b/arch/x86/kernel/apm_32.c
12029 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
12030 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12031 * even though they are called in protected mode.
12032 */
12033 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12034 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12035 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12036
12037 static const char driver_version[] = "1.16ac"; /* no spaces */
12038 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
12039 BUG_ON(cpu != 0);
12040 gdt = get_cpu_gdt_table(cpu);
12041 save_desc_40 = gdt[0x40 / 8];
12042 +
12043 + pax_open_kernel();
12044 gdt[0x40 / 8] = bad_bios_desc;
12045 + pax_close_kernel();
12046
12047 apm_irq_save(flags);
12048 APM_DO_SAVE_SEGS;
12049 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
12050 &call->esi);
12051 APM_DO_RESTORE_SEGS;
12052 apm_irq_restore(flags);
12053 +
12054 + pax_open_kernel();
12055 gdt[0x40 / 8] = save_desc_40;
12056 + pax_close_kernel();
12057 +
12058 put_cpu();
12059
12060 return call->eax & 0xff;
12061 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
12062 BUG_ON(cpu != 0);
12063 gdt = get_cpu_gdt_table(cpu);
12064 save_desc_40 = gdt[0x40 / 8];
12065 +
12066 + pax_open_kernel();
12067 gdt[0x40 / 8] = bad_bios_desc;
12068 + pax_close_kernel();
12069
12070 apm_irq_save(flags);
12071 APM_DO_SAVE_SEGS;
12072 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
12073 &call->eax);
12074 APM_DO_RESTORE_SEGS;
12075 apm_irq_restore(flags);
12076 +
12077 + pax_open_kernel();
12078 gdt[0x40 / 8] = save_desc_40;
12079 + pax_close_kernel();
12080 +
12081 put_cpu();
12082 return error;
12083 }
12084 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
12085 * code to that CPU.
12086 */
12087 gdt = get_cpu_gdt_table(0);
12088 +
12089 + pax_open_kernel();
12090 set_desc_base(&gdt[APM_CS >> 3],
12091 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12092 set_desc_base(&gdt[APM_CS_16 >> 3],
12093 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12094 set_desc_base(&gdt[APM_DS >> 3],
12095 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12096 + pax_close_kernel();
12097
12098 proc_create("apm", 0, NULL, &apm_file_ops);
12099
12100 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
12101 index 4f13faf..87db5d2 100644
12102 --- a/arch/x86/kernel/asm-offsets.c
12103 +++ b/arch/x86/kernel/asm-offsets.c
12104 @@ -33,6 +33,8 @@ void common(void) {
12105 OFFSET(TI_status, thread_info, status);
12106 OFFSET(TI_addr_limit, thread_info, addr_limit);
12107 OFFSET(TI_preempt_count, thread_info, preempt_count);
12108 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12109 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12110
12111 BLANK();
12112 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
12113 @@ -53,8 +55,26 @@ void common(void) {
12114 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12115 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12116 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12117 +
12118 +#ifdef CONFIG_PAX_KERNEXEC
12119 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12120 #endif
12121
12122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12123 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12124 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12125 +#ifdef CONFIG_X86_64
12126 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12127 +#endif
12128 +#endif
12129 +
12130 +#endif
12131 +
12132 + BLANK();
12133 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12134 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12135 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12136 +
12137 #ifdef CONFIG_XEN
12138 BLANK();
12139 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12140 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
12141 index e72a119..6e2955d 100644
12142 --- a/arch/x86/kernel/asm-offsets_64.c
12143 +++ b/arch/x86/kernel/asm-offsets_64.c
12144 @@ -69,6 +69,7 @@ int main(void)
12145 BLANK();
12146 #undef ENTRY
12147
12148 + DEFINE(TSS_size, sizeof(struct tss_struct));
12149 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
12150 BLANK();
12151
12152 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
12153 index 25f24dc..4094a7f 100644
12154 --- a/arch/x86/kernel/cpu/Makefile
12155 +++ b/arch/x86/kernel/cpu/Makefile
12156 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
12157 CFLAGS_REMOVE_perf_event.o = -pg
12158 endif
12159
12160 -# Make sure load_percpu_segment has no stackprotector
12161 -nostackp := $(call cc-option, -fno-stack-protector)
12162 -CFLAGS_common.o := $(nostackp)
12163 -
12164 obj-y := intel_cacheinfo.o scattered.o topology.o
12165 obj-y += proc.o capflags.o powerflags.o common.o
12166 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
12167 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
12168 index 0bab2b1..d0a1bf8 100644
12169 --- a/arch/x86/kernel/cpu/amd.c
12170 +++ b/arch/x86/kernel/cpu/amd.c
12171 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
12172 unsigned int size)
12173 {
12174 /* AMD errata T13 (order #21922) */
12175 - if ((c->x86 == 6)) {
12176 + if (c->x86 == 6) {
12177 /* Duron Rev A0 */
12178 if (c->x86_model == 3 && c->x86_mask == 0)
12179 size = 64;
12180 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
12181 index aa003b1..47ea638 100644
12182 --- a/arch/x86/kernel/cpu/common.c
12183 +++ b/arch/x86/kernel/cpu/common.c
12184 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
12185
12186 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12187
12188 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12189 -#ifdef CONFIG_X86_64
12190 - /*
12191 - * We need valid kernel segments for data and code in long mode too
12192 - * IRET will check the segment types kkeil 2000/10/28
12193 - * Also sysret mandates a special GDT layout
12194 - *
12195 - * TLS descriptors are currently at a different place compared to i386.
12196 - * Hopefully nobody expects them at a fixed place (Wine?)
12197 - */
12198 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12199 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12200 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12201 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12202 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12203 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12204 -#else
12205 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12206 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12207 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12208 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12209 - /*
12210 - * Segments used for calling PnP BIOS have byte granularity.
12211 - * They code segments and data segments have fixed 64k limits,
12212 - * the transfer segment sizes are set at run time.
12213 - */
12214 - /* 32-bit code */
12215 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12216 - /* 16-bit code */
12217 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12218 - /* 16-bit data */
12219 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12220 - /* 16-bit data */
12221 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12222 - /* 16-bit data */
12223 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12224 - /*
12225 - * The APM segments have byte granularity and their bases
12226 - * are set at run time. All have 64k limits.
12227 - */
12228 - /* 32-bit code */
12229 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12230 - /* 16-bit code */
12231 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12232 - /* data */
12233 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12234 -
12235 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12236 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12237 - GDT_STACK_CANARY_INIT
12238 -#endif
12239 -} };
12240 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12241 -
12242 static int __init x86_xsave_setup(char *s)
12243 {
12244 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12245 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12246 {
12247 struct desc_ptr gdt_descr;
12248
12249 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12250 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12251 gdt_descr.size = GDT_SIZE - 1;
12252 load_gdt(&gdt_descr);
12253 /* Reload the per-cpu base */
12254 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12255 /* Filter out anything that depends on CPUID levels we don't have */
12256 filter_cpuid_features(c, true);
12257
12258 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12259 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12260 +#endif
12261 +
12262 /* If the model name is still unset, do table lookup. */
12263 if (!c->x86_model_id[0]) {
12264 const char *p;
12265 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12266 }
12267 __setup("clearcpuid=", setup_disablecpuid);
12268
12269 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12270 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12271 +
12272 #ifdef CONFIG_X86_64
12273 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12274
12275 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12276 EXPORT_PER_CPU_SYMBOL(current_task);
12277
12278 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12279 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12280 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12281 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12282
12283 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12284 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12285 {
12286 memset(regs, 0, sizeof(struct pt_regs));
12287 regs->fs = __KERNEL_PERCPU;
12288 - regs->gs = __KERNEL_STACK_CANARY;
12289 + savesegment(gs, regs->gs);
12290
12291 return regs;
12292 }
12293 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12294 int i;
12295
12296 cpu = stack_smp_processor_id();
12297 - t = &per_cpu(init_tss, cpu);
12298 + t = init_tss + cpu;
12299 oist = &per_cpu(orig_ist, cpu);
12300
12301 #ifdef CONFIG_NUMA
12302 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12303 switch_to_new_gdt(cpu);
12304 loadsegment(fs, 0);
12305
12306 - load_idt((const struct desc_ptr *)&idt_descr);
12307 + load_idt(&idt_descr);
12308
12309 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12310 syscall_init();
12311 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12312 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12313 barrier();
12314
12315 - x86_configure_nx();
12316 if (cpu != 0)
12317 enable_x2apic();
12318
12319 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12320 {
12321 int cpu = smp_processor_id();
12322 struct task_struct *curr = current;
12323 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12324 + struct tss_struct *t = init_tss + cpu;
12325 struct thread_struct *thread = &curr->thread;
12326
12327 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12328 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12329 index 5231312..a78a987 100644
12330 --- a/arch/x86/kernel/cpu/intel.c
12331 +++ b/arch/x86/kernel/cpu/intel.c
12332 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12333 * Update the IDT descriptor and reload the IDT so that
12334 * it uses the read-only mapped virtual address.
12335 */
12336 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12337 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12338 load_idt(&idt_descr);
12339 }
12340 #endif
12341 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12342 index 2af127d..8ff7ac0 100644
12343 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12344 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12345 @@ -42,6 +42,7 @@
12346 #include <asm/processor.h>
12347 #include <asm/mce.h>
12348 #include <asm/msr.h>
12349 +#include <asm/local.h>
12350
12351 #include "mce-internal.h"
12352
12353 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12354 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12355 m->cs, m->ip);
12356
12357 - if (m->cs == __KERNEL_CS)
12358 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12359 print_symbol("{%s}", m->ip);
12360 pr_cont("\n");
12361 }
12362 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12363
12364 #define PANIC_TIMEOUT 5 /* 5 seconds */
12365
12366 -static atomic_t mce_paniced;
12367 +static atomic_unchecked_t mce_paniced;
12368
12369 static int fake_panic;
12370 -static atomic_t mce_fake_paniced;
12371 +static atomic_unchecked_t mce_fake_paniced;
12372
12373 /* Panic in progress. Enable interrupts and wait for final IPI */
12374 static void wait_for_panic(void)
12375 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12376 /*
12377 * Make sure only one CPU runs in machine check panic
12378 */
12379 - if (atomic_inc_return(&mce_paniced) > 1)
12380 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12381 wait_for_panic();
12382 barrier();
12383
12384 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12385 console_verbose();
12386 } else {
12387 /* Don't log too much for fake panic */
12388 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12389 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12390 return;
12391 }
12392 /* First print corrected ones that are still unlogged */
12393 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12394 * might have been modified by someone else.
12395 */
12396 rmb();
12397 - if (atomic_read(&mce_paniced))
12398 + if (atomic_read_unchecked(&mce_paniced))
12399 wait_for_panic();
12400 if (!monarch_timeout)
12401 goto out;
12402 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12403 }
12404
12405 /* Call the installed machine check handler for this CPU setup. */
12406 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12407 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12408 unexpected_machine_check;
12409
12410 /*
12411 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12412 return;
12413 }
12414
12415 + pax_open_kernel();
12416 machine_check_vector = do_machine_check;
12417 + pax_close_kernel();
12418
12419 __mcheck_cpu_init_generic();
12420 __mcheck_cpu_init_vendor(c);
12421 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12422 */
12423
12424 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12425 -static int mce_chrdev_open_count; /* #times opened */
12426 +static local_t mce_chrdev_open_count; /* #times opened */
12427 static int mce_chrdev_open_exclu; /* already open exclusive? */
12428
12429 static int mce_chrdev_open(struct inode *inode, struct file *file)
12430 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12431 spin_lock(&mce_chrdev_state_lock);
12432
12433 if (mce_chrdev_open_exclu ||
12434 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12435 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12436 spin_unlock(&mce_chrdev_state_lock);
12437
12438 return -EBUSY;
12439 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12440
12441 if (file->f_flags & O_EXCL)
12442 mce_chrdev_open_exclu = 1;
12443 - mce_chrdev_open_count++;
12444 + local_inc(&mce_chrdev_open_count);
12445
12446 spin_unlock(&mce_chrdev_state_lock);
12447
12448 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12449 {
12450 spin_lock(&mce_chrdev_state_lock);
12451
12452 - mce_chrdev_open_count--;
12453 + local_dec(&mce_chrdev_open_count);
12454 mce_chrdev_open_exclu = 0;
12455
12456 spin_unlock(&mce_chrdev_state_lock);
12457 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12458 static void mce_reset(void)
12459 {
12460 cpu_missing = 0;
12461 - atomic_set(&mce_fake_paniced, 0);
12462 + atomic_set_unchecked(&mce_fake_paniced, 0);
12463 atomic_set(&mce_executing, 0);
12464 atomic_set(&mce_callin, 0);
12465 atomic_set(&global_nwo, 0);
12466 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12467 index 5c0e653..0882b0a 100644
12468 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12469 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12470 @@ -12,6 +12,7 @@
12471 #include <asm/system.h>
12472 #include <asm/mce.h>
12473 #include <asm/msr.h>
12474 +#include <asm/pgtable.h>
12475
12476 /* By default disabled */
12477 int mce_p5_enabled __read_mostly;
12478 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12479 if (!cpu_has(c, X86_FEATURE_MCE))
12480 return;
12481
12482 + pax_open_kernel();
12483 machine_check_vector = pentium_machine_check;
12484 + pax_close_kernel();
12485 /* Make sure the vector pointer is visible before we enable MCEs: */
12486 wmb();
12487
12488 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12489 index 54060f5..c1a7577 100644
12490 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12491 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12492 @@ -11,6 +11,7 @@
12493 #include <asm/system.h>
12494 #include <asm/mce.h>
12495 #include <asm/msr.h>
12496 +#include <asm/pgtable.h>
12497
12498 /* Machine check handler for WinChip C6: */
12499 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12500 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12501 {
12502 u32 lo, hi;
12503
12504 + pax_open_kernel();
12505 machine_check_vector = winchip_machine_check;
12506 + pax_close_kernel();
12507 /* Make sure the vector pointer is visible before we enable MCEs: */
12508 wmb();
12509
12510 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12511 index 6b96110..0da73eb 100644
12512 --- a/arch/x86/kernel/cpu/mtrr/main.c
12513 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12514 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12515 u64 size_or_mask, size_and_mask;
12516 static bool mtrr_aps_delayed_init;
12517
12518 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12519 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12520
12521 const struct mtrr_ops *mtrr_if;
12522
12523 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12524 index df5e41f..816c719 100644
12525 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12526 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12527 @@ -25,7 +25,7 @@ struct mtrr_ops {
12528 int (*validate_add_page)(unsigned long base, unsigned long size,
12529 unsigned int type);
12530 int (*have_wrcomb)(void);
12531 -};
12532 +} __do_const;
12533
12534 extern int generic_get_free_region(unsigned long base, unsigned long size,
12535 int replace_reg);
12536 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12537 index 2bda212..78cc605 100644
12538 --- a/arch/x86/kernel/cpu/perf_event.c
12539 +++ b/arch/x86/kernel/cpu/perf_event.c
12540 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12541 break;
12542
12543 perf_callchain_store(entry, frame.return_address);
12544 - fp = frame.next_frame;
12545 + fp = (const void __force_user *)frame.next_frame;
12546 }
12547 }
12548
12549 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12550 index 13ad899..f642b9a 100644
12551 --- a/arch/x86/kernel/crash.c
12552 +++ b/arch/x86/kernel/crash.c
12553 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12554 {
12555 #ifdef CONFIG_X86_32
12556 struct pt_regs fixed_regs;
12557 -#endif
12558
12559 -#ifdef CONFIG_X86_32
12560 - if (!user_mode_vm(regs)) {
12561 + if (!user_mode(regs)) {
12562 crash_fixup_ss_esp(&fixed_regs, regs);
12563 regs = &fixed_regs;
12564 }
12565 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12566 index 37250fe..bf2ec74 100644
12567 --- a/arch/x86/kernel/doublefault_32.c
12568 +++ b/arch/x86/kernel/doublefault_32.c
12569 @@ -11,7 +11,7 @@
12570
12571 #define DOUBLEFAULT_STACKSIZE (1024)
12572 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12573 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12574 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12575
12576 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12577
12578 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12579 unsigned long gdt, tss;
12580
12581 store_gdt(&gdt_desc);
12582 - gdt = gdt_desc.address;
12583 + gdt = (unsigned long)gdt_desc.address;
12584
12585 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12586
12587 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12588 /* 0x2 bit is always set */
12589 .flags = X86_EFLAGS_SF | 0x2,
12590 .sp = STACK_START,
12591 - .es = __USER_DS,
12592 + .es = __KERNEL_DS,
12593 .cs = __KERNEL_CS,
12594 .ss = __KERNEL_DS,
12595 - .ds = __USER_DS,
12596 + .ds = __KERNEL_DS,
12597 .fs = __KERNEL_PERCPU,
12598
12599 .__cr3 = __pa_nodebug(swapper_pg_dir),
12600 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12601 index 1aae78f..aab3a3d 100644
12602 --- a/arch/x86/kernel/dumpstack.c
12603 +++ b/arch/x86/kernel/dumpstack.c
12604 @@ -2,6 +2,9 @@
12605 * Copyright (C) 1991, 1992 Linus Torvalds
12606 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12607 */
12608 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12609 +#define __INCLUDED_BY_HIDESYM 1
12610 +#endif
12611 #include <linux/kallsyms.h>
12612 #include <linux/kprobes.h>
12613 #include <linux/uaccess.h>
12614 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12615 static void
12616 print_ftrace_graph_addr(unsigned long addr, void *data,
12617 const struct stacktrace_ops *ops,
12618 - struct thread_info *tinfo, int *graph)
12619 + struct task_struct *task, int *graph)
12620 {
12621 - struct task_struct *task = tinfo->task;
12622 unsigned long ret_addr;
12623 int index = task->curr_ret_stack;
12624
12625 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12626 static inline void
12627 print_ftrace_graph_addr(unsigned long addr, void *data,
12628 const struct stacktrace_ops *ops,
12629 - struct thread_info *tinfo, int *graph)
12630 + struct task_struct *task, int *graph)
12631 { }
12632 #endif
12633
12634 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12635 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12636 */
12637
12638 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12639 - void *p, unsigned int size, void *end)
12640 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12641 {
12642 - void *t = tinfo;
12643 if (end) {
12644 if (p < end && p >= (end-THREAD_SIZE))
12645 return 1;
12646 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12647 }
12648
12649 unsigned long
12650 -print_context_stack(struct thread_info *tinfo,
12651 +print_context_stack(struct task_struct *task, void *stack_start,
12652 unsigned long *stack, unsigned long bp,
12653 const struct stacktrace_ops *ops, void *data,
12654 unsigned long *end, int *graph)
12655 {
12656 struct stack_frame *frame = (struct stack_frame *)bp;
12657
12658 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12659 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12660 unsigned long addr;
12661
12662 addr = *stack;
12663 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12664 } else {
12665 ops->address(data, addr, 0);
12666 }
12667 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12668 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12669 }
12670 stack++;
12671 }
12672 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12673 EXPORT_SYMBOL_GPL(print_context_stack);
12674
12675 unsigned long
12676 -print_context_stack_bp(struct thread_info *tinfo,
12677 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12678 unsigned long *stack, unsigned long bp,
12679 const struct stacktrace_ops *ops, void *data,
12680 unsigned long *end, int *graph)
12681 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12682 struct stack_frame *frame = (struct stack_frame *)bp;
12683 unsigned long *ret_addr = &frame->return_address;
12684
12685 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12686 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12687 unsigned long addr = *ret_addr;
12688
12689 if (!__kernel_text_address(addr))
12690 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12691 ops->address(data, addr, 1);
12692 frame = frame->next_frame;
12693 ret_addr = &frame->return_address;
12694 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12695 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12696 }
12697
12698 return (unsigned long)frame;
12699 @@ -186,7 +186,7 @@ void dump_stack(void)
12700
12701 bp = stack_frame(current, NULL);
12702 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12703 - current->pid, current->comm, print_tainted(),
12704 + task_pid_nr(current), current->comm, print_tainted(),
12705 init_utsname()->release,
12706 (int)strcspn(init_utsname()->version, " "),
12707 init_utsname()->version);
12708 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12709 }
12710 EXPORT_SYMBOL_GPL(oops_begin);
12711
12712 +extern void gr_handle_kernel_exploit(void);
12713 +
12714 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12715 {
12716 if (regs && kexec_should_crash(current))
12717 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12718 panic("Fatal exception in interrupt");
12719 if (panic_on_oops)
12720 panic("Fatal exception");
12721 - do_exit(signr);
12722 +
12723 + gr_handle_kernel_exploit();
12724 +
12725 + do_group_exit(signr);
12726 }
12727
12728 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12729 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12730
12731 show_registers(regs);
12732 #ifdef CONFIG_X86_32
12733 - if (user_mode_vm(regs)) {
12734 + if (user_mode(regs)) {
12735 sp = regs->sp;
12736 ss = regs->ss & 0xffff;
12737 } else {
12738 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12739 unsigned long flags = oops_begin();
12740 int sig = SIGSEGV;
12741
12742 - if (!user_mode_vm(regs))
12743 + if (!user_mode(regs))
12744 report_bug(regs->ip, regs);
12745
12746 if (__die(str, regs, err))
12747 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12748 index c99f9ed..2a15d80 100644
12749 --- a/arch/x86/kernel/dumpstack_32.c
12750 +++ b/arch/x86/kernel/dumpstack_32.c
12751 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12752 bp = stack_frame(task, regs);
12753
12754 for (;;) {
12755 - struct thread_info *context;
12756 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12757
12758 - context = (struct thread_info *)
12759 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12760 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12761 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12762
12763 - stack = (unsigned long *)context->previous_esp;
12764 - if (!stack)
12765 + if (stack_start == task_stack_page(task))
12766 break;
12767 + stack = *(unsigned long **)stack_start;
12768 if (ops->stack(data, "IRQ") < 0)
12769 break;
12770 touch_nmi_watchdog();
12771 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12772 * When in-kernel, we also print out the stack and code at the
12773 * time of the fault..
12774 */
12775 - if (!user_mode_vm(regs)) {
12776 + if (!user_mode(regs)) {
12777 unsigned int code_prologue = code_bytes * 43 / 64;
12778 unsigned int code_len = code_bytes;
12779 unsigned char c;
12780 u8 *ip;
12781 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12782
12783 printk(KERN_EMERG "Stack:\n");
12784 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12785
12786 printk(KERN_EMERG "Code: ");
12787
12788 - ip = (u8 *)regs->ip - code_prologue;
12789 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12790 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12791 /* try starting at IP */
12792 - ip = (u8 *)regs->ip;
12793 + ip = (u8 *)regs->ip + cs_base;
12794 code_len = code_len - code_prologue + 1;
12795 }
12796 for (i = 0; i < code_len; i++, ip++) {
12797 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12798 printk(KERN_CONT " Bad EIP value.");
12799 break;
12800 }
12801 - if (ip == (u8 *)regs->ip)
12802 + if (ip == (u8 *)regs->ip + cs_base)
12803 printk(KERN_CONT "<%02x> ", c);
12804 else
12805 printk(KERN_CONT "%02x ", c);
12806 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12807 {
12808 unsigned short ud2;
12809
12810 + ip = ktla_ktva(ip);
12811 if (ip < PAGE_OFFSET)
12812 return 0;
12813 if (probe_kernel_address((unsigned short *)ip, ud2))
12814 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12815
12816 return ud2 == 0x0b0f;
12817 }
12818 +
12819 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12820 +void pax_check_alloca(unsigned long size)
12821 +{
12822 + unsigned long sp = (unsigned long)&sp, stack_left;
12823 +
12824 + /* all kernel stacks are of the same size */
12825 + stack_left = sp & (THREAD_SIZE - 1);
12826 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12827 +}
12828 +EXPORT_SYMBOL(pax_check_alloca);
12829 +#endif
12830 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12831 index 6d728d9..279514e 100644
12832 --- a/arch/x86/kernel/dumpstack_64.c
12833 +++ b/arch/x86/kernel/dumpstack_64.c
12834 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12835 unsigned long *irq_stack_end =
12836 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12837 unsigned used = 0;
12838 - struct thread_info *tinfo;
12839 int graph = 0;
12840 unsigned long dummy;
12841 + void *stack_start;
12842
12843 if (!task)
12844 task = current;
12845 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12846 * current stack address. If the stacks consist of nested
12847 * exceptions
12848 */
12849 - tinfo = task_thread_info(task);
12850 for (;;) {
12851 char *id;
12852 unsigned long *estack_end;
12853 +
12854 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12855 &used, &id);
12856
12857 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12858 if (ops->stack(data, id) < 0)
12859 break;
12860
12861 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12862 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12863 data, estack_end, &graph);
12864 ops->stack(data, "<EOE>");
12865 /*
12866 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12867 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12868 if (ops->stack(data, "IRQ") < 0)
12869 break;
12870 - bp = ops->walk_stack(tinfo, stack, bp,
12871 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12872 ops, data, irq_stack_end, &graph);
12873 /*
12874 * We link to the next stack (which would be
12875 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12876 /*
12877 * This handles the process stack:
12878 */
12879 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12880 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12881 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12882 put_cpu();
12883 }
12884 EXPORT_SYMBOL(dump_trace);
12885 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12886
12887 return ud2 == 0x0b0f;
12888 }
12889 +
12890 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12891 +void pax_check_alloca(unsigned long size)
12892 +{
12893 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12894 + unsigned cpu, used;
12895 + char *id;
12896 +
12897 + /* check the process stack first */
12898 + stack_start = (unsigned long)task_stack_page(current);
12899 + stack_end = stack_start + THREAD_SIZE;
12900 + if (likely(stack_start <= sp && sp < stack_end)) {
12901 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12902 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12903 + return;
12904 + }
12905 +
12906 + cpu = get_cpu();
12907 +
12908 + /* check the irq stacks */
12909 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12910 + stack_start = stack_end - IRQ_STACK_SIZE;
12911 + if (stack_start <= sp && sp < stack_end) {
12912 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12913 + put_cpu();
12914 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12915 + return;
12916 + }
12917 +
12918 + /* check the exception stacks */
12919 + used = 0;
12920 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12921 + stack_start = stack_end - EXCEPTION_STKSZ;
12922 + if (stack_end && stack_start <= sp && sp < stack_end) {
12923 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12924 + put_cpu();
12925 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12926 + return;
12927 + }
12928 +
12929 + put_cpu();
12930 +
12931 + /* unknown stack */
12932 + BUG();
12933 +}
12934 +EXPORT_SYMBOL(pax_check_alloca);
12935 +#endif
12936 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12937 index cd28a35..c72ed9a 100644
12938 --- a/arch/x86/kernel/early_printk.c
12939 +++ b/arch/x86/kernel/early_printk.c
12940 @@ -7,6 +7,7 @@
12941 #include <linux/pci_regs.h>
12942 #include <linux/pci_ids.h>
12943 #include <linux/errno.h>
12944 +#include <linux/sched.h>
12945 #include <asm/io.h>
12946 #include <asm/processor.h>
12947 #include <asm/fcntl.h>
12948 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12949 index f3f6f53..0841b66 100644
12950 --- a/arch/x86/kernel/entry_32.S
12951 +++ b/arch/x86/kernel/entry_32.S
12952 @@ -186,13 +186,146 @@
12953 /*CFI_REL_OFFSET gs, PT_GS*/
12954 .endm
12955 .macro SET_KERNEL_GS reg
12956 +
12957 +#ifdef CONFIG_CC_STACKPROTECTOR
12958 movl $(__KERNEL_STACK_CANARY), \reg
12959 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12960 + movl $(__USER_DS), \reg
12961 +#else
12962 + xorl \reg, \reg
12963 +#endif
12964 +
12965 movl \reg, %gs
12966 .endm
12967
12968 #endif /* CONFIG_X86_32_LAZY_GS */
12969
12970 -.macro SAVE_ALL
12971 +.macro pax_enter_kernel
12972 +#ifdef CONFIG_PAX_KERNEXEC
12973 + call pax_enter_kernel
12974 +#endif
12975 +.endm
12976 +
12977 +.macro pax_exit_kernel
12978 +#ifdef CONFIG_PAX_KERNEXEC
12979 + call pax_exit_kernel
12980 +#endif
12981 +.endm
12982 +
12983 +#ifdef CONFIG_PAX_KERNEXEC
12984 +ENTRY(pax_enter_kernel)
12985 +#ifdef CONFIG_PARAVIRT
12986 + pushl %eax
12987 + pushl %ecx
12988 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12989 + mov %eax, %esi
12990 +#else
12991 + mov %cr0, %esi
12992 +#endif
12993 + bts $16, %esi
12994 + jnc 1f
12995 + mov %cs, %esi
12996 + cmp $__KERNEL_CS, %esi
12997 + jz 3f
12998 + ljmp $__KERNEL_CS, $3f
12999 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13000 +2:
13001 +#ifdef CONFIG_PARAVIRT
13002 + mov %esi, %eax
13003 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13004 +#else
13005 + mov %esi, %cr0
13006 +#endif
13007 +3:
13008 +#ifdef CONFIG_PARAVIRT
13009 + popl %ecx
13010 + popl %eax
13011 +#endif
13012 + ret
13013 +ENDPROC(pax_enter_kernel)
13014 +
13015 +ENTRY(pax_exit_kernel)
13016 +#ifdef CONFIG_PARAVIRT
13017 + pushl %eax
13018 + pushl %ecx
13019 +#endif
13020 + mov %cs, %esi
13021 + cmp $__KERNEXEC_KERNEL_CS, %esi
13022 + jnz 2f
13023 +#ifdef CONFIG_PARAVIRT
13024 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13025 + mov %eax, %esi
13026 +#else
13027 + mov %cr0, %esi
13028 +#endif
13029 + btr $16, %esi
13030 + ljmp $__KERNEL_CS, $1f
13031 +1:
13032 +#ifdef CONFIG_PARAVIRT
13033 + mov %esi, %eax
13034 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13035 +#else
13036 + mov %esi, %cr0
13037 +#endif
13038 +2:
13039 +#ifdef CONFIG_PARAVIRT
13040 + popl %ecx
13041 + popl %eax
13042 +#endif
13043 + ret
13044 +ENDPROC(pax_exit_kernel)
13045 +#endif
13046 +
13047 +.macro pax_erase_kstack
13048 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13049 + call pax_erase_kstack
13050 +#endif
13051 +.endm
13052 +
13053 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13054 +/*
13055 + * ebp: thread_info
13056 + * ecx, edx: can be clobbered
13057 + */
13058 +ENTRY(pax_erase_kstack)
13059 + pushl %edi
13060 + pushl %eax
13061 +
13062 + mov TI_lowest_stack(%ebp), %edi
13063 + mov $-0xBEEF, %eax
13064 + std
13065 +
13066 +1: mov %edi, %ecx
13067 + and $THREAD_SIZE_asm - 1, %ecx
13068 + shr $2, %ecx
13069 + repne scasl
13070 + jecxz 2f
13071 +
13072 + cmp $2*16, %ecx
13073 + jc 2f
13074 +
13075 + mov $2*16, %ecx
13076 + repe scasl
13077 + jecxz 2f
13078 + jne 1b
13079 +
13080 +2: cld
13081 + mov %esp, %ecx
13082 + sub %edi, %ecx
13083 + shr $2, %ecx
13084 + rep stosl
13085 +
13086 + mov TI_task_thread_sp0(%ebp), %edi
13087 + sub $128, %edi
13088 + mov %edi, TI_lowest_stack(%ebp)
13089 +
13090 + popl %eax
13091 + popl %edi
13092 + ret
13093 +ENDPROC(pax_erase_kstack)
13094 +#endif
13095 +
13096 +.macro __SAVE_ALL _DS
13097 cld
13098 PUSH_GS
13099 pushl_cfi %fs
13100 @@ -215,7 +348,7 @@
13101 CFI_REL_OFFSET ecx, 0
13102 pushl_cfi %ebx
13103 CFI_REL_OFFSET ebx, 0
13104 - movl $(__USER_DS), %edx
13105 + movl $\_DS, %edx
13106 movl %edx, %ds
13107 movl %edx, %es
13108 movl $(__KERNEL_PERCPU), %edx
13109 @@ -223,6 +356,15 @@
13110 SET_KERNEL_GS %edx
13111 .endm
13112
13113 +.macro SAVE_ALL
13114 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13115 + __SAVE_ALL __KERNEL_DS
13116 + pax_enter_kernel
13117 +#else
13118 + __SAVE_ALL __USER_DS
13119 +#endif
13120 +.endm
13121 +
13122 .macro RESTORE_INT_REGS
13123 popl_cfi %ebx
13124 CFI_RESTORE ebx
13125 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
13126 popfl_cfi
13127 jmp syscall_exit
13128 CFI_ENDPROC
13129 -END(ret_from_fork)
13130 +ENDPROC(ret_from_fork)
13131
13132 /*
13133 * Interrupt exit functions should be protected against kprobes
13134 @@ -333,7 +475,15 @@ check_userspace:
13135 movb PT_CS(%esp), %al
13136 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13137 cmpl $USER_RPL, %eax
13138 +
13139 +#ifdef CONFIG_PAX_KERNEXEC
13140 + jae resume_userspace
13141 +
13142 + PAX_EXIT_KERNEL
13143 + jmp resume_kernel
13144 +#else
13145 jb resume_kernel # not returning to v8086 or userspace
13146 +#endif
13147
13148 ENTRY(resume_userspace)
13149 LOCKDEP_SYS_EXIT
13150 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
13151 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13152 # int/exception return?
13153 jne work_pending
13154 - jmp restore_all
13155 -END(ret_from_exception)
13156 + jmp restore_all_pax
13157 +ENDPROC(ret_from_exception)
13158
13159 #ifdef CONFIG_PREEMPT
13160 ENTRY(resume_kernel)
13161 @@ -361,7 +511,7 @@ need_resched:
13162 jz restore_all
13163 call preempt_schedule_irq
13164 jmp need_resched
13165 -END(resume_kernel)
13166 +ENDPROC(resume_kernel)
13167 #endif
13168 CFI_ENDPROC
13169 /*
13170 @@ -395,23 +545,34 @@ sysenter_past_esp:
13171 /*CFI_REL_OFFSET cs, 0*/
13172 /*
13173 * Push current_thread_info()->sysenter_return to the stack.
13174 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13175 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13176 */
13177 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
13178 + pushl_cfi $0
13179 CFI_REL_OFFSET eip, 0
13180
13181 pushl_cfi %eax
13182 SAVE_ALL
13183 + GET_THREAD_INFO(%ebp)
13184 + movl TI_sysenter_return(%ebp),%ebp
13185 + movl %ebp,PT_EIP(%esp)
13186 ENABLE_INTERRUPTS(CLBR_NONE)
13187
13188 /*
13189 * Load the potential sixth argument from user stack.
13190 * Careful about security.
13191 */
13192 + movl PT_OLDESP(%esp),%ebp
13193 +
13194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13195 + mov PT_OLDSS(%esp),%ds
13196 +1: movl %ds:(%ebp),%ebp
13197 + push %ss
13198 + pop %ds
13199 +#else
13200 cmpl $__PAGE_OFFSET-3,%ebp
13201 jae syscall_fault
13202 1: movl (%ebp),%ebp
13203 +#endif
13204 +
13205 movl %ebp,PT_EBP(%esp)
13206 .section __ex_table,"a"
13207 .align 4
13208 @@ -434,12 +595,24 @@ sysenter_do_call:
13209 testl $_TIF_ALLWORK_MASK, %ecx
13210 jne sysexit_audit
13211 sysenter_exit:
13212 +
13213 +#ifdef CONFIG_PAX_RANDKSTACK
13214 + pushl_cfi %eax
13215 + movl %esp, %eax
13216 + call pax_randomize_kstack
13217 + popl_cfi %eax
13218 +#endif
13219 +
13220 + pax_erase_kstack
13221 +
13222 /* if something modifies registers it must also disable sysexit */
13223 movl PT_EIP(%esp), %edx
13224 movl PT_OLDESP(%esp), %ecx
13225 xorl %ebp,%ebp
13226 TRACE_IRQS_ON
13227 1: mov PT_FS(%esp), %fs
13228 +2: mov PT_DS(%esp), %ds
13229 +3: mov PT_ES(%esp), %es
13230 PTGS_TO_GS
13231 ENABLE_INTERRUPTS_SYSEXIT
13232
13233 @@ -456,6 +629,9 @@ sysenter_audit:
13234 movl %eax,%edx /* 2nd arg: syscall number */
13235 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13236 call audit_syscall_entry
13237 +
13238 + pax_erase_kstack
13239 +
13240 pushl_cfi %ebx
13241 movl PT_EAX(%esp),%eax /* reload syscall number */
13242 jmp sysenter_do_call
13243 @@ -482,11 +658,17 @@ sysexit_audit:
13244
13245 CFI_ENDPROC
13246 .pushsection .fixup,"ax"
13247 -2: movl $0,PT_FS(%esp)
13248 +4: movl $0,PT_FS(%esp)
13249 + jmp 1b
13250 +5: movl $0,PT_DS(%esp)
13251 + jmp 1b
13252 +6: movl $0,PT_ES(%esp)
13253 jmp 1b
13254 .section __ex_table,"a"
13255 .align 4
13256 - .long 1b,2b
13257 + .long 1b,4b
13258 + .long 2b,5b
13259 + .long 3b,6b
13260 .popsection
13261 PTGS_TO_GS_EX
13262 ENDPROC(ia32_sysenter_target)
13263 @@ -519,6 +701,15 @@ syscall_exit:
13264 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13265 jne syscall_exit_work
13266
13267 +restore_all_pax:
13268 +
13269 +#ifdef CONFIG_PAX_RANDKSTACK
13270 + movl %esp, %eax
13271 + call pax_randomize_kstack
13272 +#endif
13273 +
13274 + pax_erase_kstack
13275 +
13276 restore_all:
13277 TRACE_IRQS_IRET
13278 restore_all_notrace:
13279 @@ -578,14 +769,34 @@ ldt_ss:
13280 * compensating for the offset by changing to the ESPFIX segment with
13281 * a base address that matches for the difference.
13282 */
13283 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13284 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13285 mov %esp, %edx /* load kernel esp */
13286 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13287 mov %dx, %ax /* eax: new kernel esp */
13288 sub %eax, %edx /* offset (low word is 0) */
13289 +#ifdef CONFIG_SMP
13290 + movl PER_CPU_VAR(cpu_number), %ebx
13291 + shll $PAGE_SHIFT_asm, %ebx
13292 + addl $cpu_gdt_table, %ebx
13293 +#else
13294 + movl $cpu_gdt_table, %ebx
13295 +#endif
13296 shr $16, %edx
13297 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13298 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13299 +
13300 +#ifdef CONFIG_PAX_KERNEXEC
13301 + mov %cr0, %esi
13302 + btr $16, %esi
13303 + mov %esi, %cr0
13304 +#endif
13305 +
13306 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13307 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13308 +
13309 +#ifdef CONFIG_PAX_KERNEXEC
13310 + bts $16, %esi
13311 + mov %esi, %cr0
13312 +#endif
13313 +
13314 pushl_cfi $__ESPFIX_SS
13315 pushl_cfi %eax /* new kernel esp */
13316 /* Disable interrupts, but do not irqtrace this section: we
13317 @@ -614,34 +825,28 @@ work_resched:
13318 movl TI_flags(%ebp), %ecx
13319 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13320 # than syscall tracing?
13321 - jz restore_all
13322 + jz restore_all_pax
13323 testb $_TIF_NEED_RESCHED, %cl
13324 jnz work_resched
13325
13326 work_notifysig: # deal with pending signals and
13327 # notify-resume requests
13328 + movl %esp, %eax
13329 #ifdef CONFIG_VM86
13330 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13331 - movl %esp, %eax
13332 - jne work_notifysig_v86 # returning to kernel-space or
13333 + jz 1f # returning to kernel-space or
13334 # vm86-space
13335 - xorl %edx, %edx
13336 - call do_notify_resume
13337 - jmp resume_userspace_sig
13338
13339 - ALIGN
13340 -work_notifysig_v86:
13341 pushl_cfi %ecx # save ti_flags for do_notify_resume
13342 call save_v86_state # %eax contains pt_regs pointer
13343 popl_cfi %ecx
13344 movl %eax, %esp
13345 -#else
13346 - movl %esp, %eax
13347 +1:
13348 #endif
13349 xorl %edx, %edx
13350 call do_notify_resume
13351 jmp resume_userspace_sig
13352 -END(work_pending)
13353 +ENDPROC(work_pending)
13354
13355 # perform syscall exit tracing
13356 ALIGN
13357 @@ -649,11 +854,14 @@ syscall_trace_entry:
13358 movl $-ENOSYS,PT_EAX(%esp)
13359 movl %esp, %eax
13360 call syscall_trace_enter
13361 +
13362 + pax_erase_kstack
13363 +
13364 /* What it returned is what we'll actually use. */
13365 cmpl $(nr_syscalls), %eax
13366 jnae syscall_call
13367 jmp syscall_exit
13368 -END(syscall_trace_entry)
13369 +ENDPROC(syscall_trace_entry)
13370
13371 # perform syscall exit tracing
13372 ALIGN
13373 @@ -666,20 +874,24 @@ syscall_exit_work:
13374 movl %esp, %eax
13375 call syscall_trace_leave
13376 jmp resume_userspace
13377 -END(syscall_exit_work)
13378 +ENDPROC(syscall_exit_work)
13379 CFI_ENDPROC
13380
13381 RING0_INT_FRAME # can't unwind into user space anyway
13382 syscall_fault:
13383 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13384 + push %ss
13385 + pop %ds
13386 +#endif
13387 GET_THREAD_INFO(%ebp)
13388 movl $-EFAULT,PT_EAX(%esp)
13389 jmp resume_userspace
13390 -END(syscall_fault)
13391 +ENDPROC(syscall_fault)
13392
13393 syscall_badsys:
13394 movl $-ENOSYS,PT_EAX(%esp)
13395 jmp resume_userspace
13396 -END(syscall_badsys)
13397 +ENDPROC(syscall_badsys)
13398 CFI_ENDPROC
13399 /*
13400 * End of kprobes section
13401 @@ -753,6 +965,36 @@ ptregs_clone:
13402 CFI_ENDPROC
13403 ENDPROC(ptregs_clone)
13404
13405 + ALIGN;
13406 +ENTRY(kernel_execve)
13407 + CFI_STARTPROC
13408 + pushl_cfi %ebp
13409 + sub $PT_OLDSS+4,%esp
13410 + pushl_cfi %edi
13411 + pushl_cfi %ecx
13412 + pushl_cfi %eax
13413 + lea 3*4(%esp),%edi
13414 + mov $PT_OLDSS/4+1,%ecx
13415 + xorl %eax,%eax
13416 + rep stosl
13417 + popl_cfi %eax
13418 + popl_cfi %ecx
13419 + popl_cfi %edi
13420 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13421 + pushl_cfi %esp
13422 + call sys_execve
13423 + add $4,%esp
13424 + CFI_ADJUST_CFA_OFFSET -4
13425 + GET_THREAD_INFO(%ebp)
13426 + test %eax,%eax
13427 + jz syscall_exit
13428 + add $PT_OLDSS+4,%esp
13429 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13430 + popl_cfi %ebp
13431 + ret
13432 + CFI_ENDPROC
13433 +ENDPROC(kernel_execve)
13434 +
13435 .macro FIXUP_ESPFIX_STACK
13436 /*
13437 * Switch back for ESPFIX stack to the normal zerobased stack
13438 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13439 * normal stack and adjusts ESP with the matching offset.
13440 */
13441 /* fixup the stack */
13442 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13443 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13444 +#ifdef CONFIG_SMP
13445 + movl PER_CPU_VAR(cpu_number), %ebx
13446 + shll $PAGE_SHIFT_asm, %ebx
13447 + addl $cpu_gdt_table, %ebx
13448 +#else
13449 + movl $cpu_gdt_table, %ebx
13450 +#endif
13451 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13452 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13453 shl $16, %eax
13454 addl %esp, %eax /* the adjusted stack pointer */
13455 pushl_cfi $__KERNEL_DS
13456 @@ -816,7 +1065,7 @@ vector=vector+1
13457 .endr
13458 2: jmp common_interrupt
13459 .endr
13460 -END(irq_entries_start)
13461 +ENDPROC(irq_entries_start)
13462
13463 .previous
13464 END(interrupt)
13465 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13466 pushl_cfi $do_coprocessor_error
13467 jmp error_code
13468 CFI_ENDPROC
13469 -END(coprocessor_error)
13470 +ENDPROC(coprocessor_error)
13471
13472 ENTRY(simd_coprocessor_error)
13473 RING0_INT_FRAME
13474 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13475 #endif
13476 jmp error_code
13477 CFI_ENDPROC
13478 -END(simd_coprocessor_error)
13479 +ENDPROC(simd_coprocessor_error)
13480
13481 ENTRY(device_not_available)
13482 RING0_INT_FRAME
13483 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13484 pushl_cfi $do_device_not_available
13485 jmp error_code
13486 CFI_ENDPROC
13487 -END(device_not_available)
13488 +ENDPROC(device_not_available)
13489
13490 #ifdef CONFIG_PARAVIRT
13491 ENTRY(native_iret)
13492 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13493 .align 4
13494 .long native_iret, iret_exc
13495 .previous
13496 -END(native_iret)
13497 +ENDPROC(native_iret)
13498
13499 ENTRY(native_irq_enable_sysexit)
13500 sti
13501 sysexit
13502 -END(native_irq_enable_sysexit)
13503 +ENDPROC(native_irq_enable_sysexit)
13504 #endif
13505
13506 ENTRY(overflow)
13507 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13508 pushl_cfi $do_overflow
13509 jmp error_code
13510 CFI_ENDPROC
13511 -END(overflow)
13512 +ENDPROC(overflow)
13513
13514 ENTRY(bounds)
13515 RING0_INT_FRAME
13516 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13517 pushl_cfi $do_bounds
13518 jmp error_code
13519 CFI_ENDPROC
13520 -END(bounds)
13521 +ENDPROC(bounds)
13522
13523 ENTRY(invalid_op)
13524 RING0_INT_FRAME
13525 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13526 pushl_cfi $do_invalid_op
13527 jmp error_code
13528 CFI_ENDPROC
13529 -END(invalid_op)
13530 +ENDPROC(invalid_op)
13531
13532 ENTRY(coprocessor_segment_overrun)
13533 RING0_INT_FRAME
13534 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13535 pushl_cfi $do_coprocessor_segment_overrun
13536 jmp error_code
13537 CFI_ENDPROC
13538 -END(coprocessor_segment_overrun)
13539 +ENDPROC(coprocessor_segment_overrun)
13540
13541 ENTRY(invalid_TSS)
13542 RING0_EC_FRAME
13543 pushl_cfi $do_invalid_TSS
13544 jmp error_code
13545 CFI_ENDPROC
13546 -END(invalid_TSS)
13547 +ENDPROC(invalid_TSS)
13548
13549 ENTRY(segment_not_present)
13550 RING0_EC_FRAME
13551 pushl_cfi $do_segment_not_present
13552 jmp error_code
13553 CFI_ENDPROC
13554 -END(segment_not_present)
13555 +ENDPROC(segment_not_present)
13556
13557 ENTRY(stack_segment)
13558 RING0_EC_FRAME
13559 pushl_cfi $do_stack_segment
13560 jmp error_code
13561 CFI_ENDPROC
13562 -END(stack_segment)
13563 +ENDPROC(stack_segment)
13564
13565 ENTRY(alignment_check)
13566 RING0_EC_FRAME
13567 pushl_cfi $do_alignment_check
13568 jmp error_code
13569 CFI_ENDPROC
13570 -END(alignment_check)
13571 +ENDPROC(alignment_check)
13572
13573 ENTRY(divide_error)
13574 RING0_INT_FRAME
13575 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13576 pushl_cfi $do_divide_error
13577 jmp error_code
13578 CFI_ENDPROC
13579 -END(divide_error)
13580 +ENDPROC(divide_error)
13581
13582 #ifdef CONFIG_X86_MCE
13583 ENTRY(machine_check)
13584 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13585 pushl_cfi machine_check_vector
13586 jmp error_code
13587 CFI_ENDPROC
13588 -END(machine_check)
13589 +ENDPROC(machine_check)
13590 #endif
13591
13592 ENTRY(spurious_interrupt_bug)
13593 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13594 pushl_cfi $do_spurious_interrupt_bug
13595 jmp error_code
13596 CFI_ENDPROC
13597 -END(spurious_interrupt_bug)
13598 +ENDPROC(spurious_interrupt_bug)
13599 /*
13600 * End of kprobes section
13601 */
13602 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13603
13604 ENTRY(mcount)
13605 ret
13606 -END(mcount)
13607 +ENDPROC(mcount)
13608
13609 ENTRY(ftrace_caller)
13610 cmpl $0, function_trace_stop
13611 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13612 .globl ftrace_stub
13613 ftrace_stub:
13614 ret
13615 -END(ftrace_caller)
13616 +ENDPROC(ftrace_caller)
13617
13618 #else /* ! CONFIG_DYNAMIC_FTRACE */
13619
13620 @@ -1174,7 +1423,7 @@ trace:
13621 popl %ecx
13622 popl %eax
13623 jmp ftrace_stub
13624 -END(mcount)
13625 +ENDPROC(mcount)
13626 #endif /* CONFIG_DYNAMIC_FTRACE */
13627 #endif /* CONFIG_FUNCTION_TRACER */
13628
13629 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13630 popl %ecx
13631 popl %eax
13632 ret
13633 -END(ftrace_graph_caller)
13634 +ENDPROC(ftrace_graph_caller)
13635
13636 .globl return_to_handler
13637 return_to_handler:
13638 @@ -1209,7 +1458,6 @@ return_to_handler:
13639 jmp *%ecx
13640 #endif
13641
13642 -.section .rodata,"a"
13643 #include "syscall_table_32.S"
13644
13645 syscall_table_size=(.-sys_call_table)
13646 @@ -1255,15 +1503,18 @@ error_code:
13647 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13648 REG_TO_PTGS %ecx
13649 SET_KERNEL_GS %ecx
13650 - movl $(__USER_DS), %ecx
13651 + movl $(__KERNEL_DS), %ecx
13652 movl %ecx, %ds
13653 movl %ecx, %es
13654 +
13655 + pax_enter_kernel
13656 +
13657 TRACE_IRQS_OFF
13658 movl %esp,%eax # pt_regs pointer
13659 call *%edi
13660 jmp ret_from_exception
13661 CFI_ENDPROC
13662 -END(page_fault)
13663 +ENDPROC(page_fault)
13664
13665 /*
13666 * Debug traps and NMI can happen at the one SYSENTER instruction
13667 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13668 call do_debug
13669 jmp ret_from_exception
13670 CFI_ENDPROC
13671 -END(debug)
13672 +ENDPROC(debug)
13673
13674 /*
13675 * NMI is doubly nasty. It can happen _while_ we're handling
13676 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13677 xorl %edx,%edx # zero error code
13678 movl %esp,%eax # pt_regs pointer
13679 call do_nmi
13680 +
13681 + pax_exit_kernel
13682 +
13683 jmp restore_all_notrace
13684 CFI_ENDPROC
13685
13686 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13687 FIXUP_ESPFIX_STACK # %eax == %esp
13688 xorl %edx,%edx # zero error code
13689 call do_nmi
13690 +
13691 + pax_exit_kernel
13692 +
13693 RESTORE_REGS
13694 lss 12+4(%esp), %esp # back to espfix stack
13695 CFI_ADJUST_CFA_OFFSET -24
13696 jmp irq_return
13697 CFI_ENDPROC
13698 -END(nmi)
13699 +ENDPROC(nmi)
13700
13701 ENTRY(int3)
13702 RING0_INT_FRAME
13703 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13704 call do_int3
13705 jmp ret_from_exception
13706 CFI_ENDPROC
13707 -END(int3)
13708 +ENDPROC(int3)
13709
13710 ENTRY(general_protection)
13711 RING0_EC_FRAME
13712 pushl_cfi $do_general_protection
13713 jmp error_code
13714 CFI_ENDPROC
13715 -END(general_protection)
13716 +ENDPROC(general_protection)
13717
13718 #ifdef CONFIG_KVM_GUEST
13719 ENTRY(async_page_fault)
13720 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13721 pushl_cfi $do_async_page_fault
13722 jmp error_code
13723 CFI_ENDPROC
13724 -END(async_page_fault)
13725 +ENDPROC(async_page_fault)
13726 #endif
13727
13728 /*
13729 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13730 index faf8d5e..4f16a68 100644
13731 --- a/arch/x86/kernel/entry_64.S
13732 +++ b/arch/x86/kernel/entry_64.S
13733 @@ -55,6 +55,8 @@
13734 #include <asm/paravirt.h>
13735 #include <asm/ftrace.h>
13736 #include <asm/percpu.h>
13737 +#include <asm/pgtable.h>
13738 +#include <asm/alternative-asm.h>
13739
13740 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13741 #include <linux/elf-em.h>
13742 @@ -68,8 +70,9 @@
13743 #ifdef CONFIG_FUNCTION_TRACER
13744 #ifdef CONFIG_DYNAMIC_FTRACE
13745 ENTRY(mcount)
13746 + pax_force_retaddr
13747 retq
13748 -END(mcount)
13749 +ENDPROC(mcount)
13750
13751 ENTRY(ftrace_caller)
13752 cmpl $0, function_trace_stop
13753 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13754 #endif
13755
13756 GLOBAL(ftrace_stub)
13757 + pax_force_retaddr
13758 retq
13759 -END(ftrace_caller)
13760 +ENDPROC(ftrace_caller)
13761
13762 #else /* ! CONFIG_DYNAMIC_FTRACE */
13763 ENTRY(mcount)
13764 @@ -112,6 +116,7 @@ ENTRY(mcount)
13765 #endif
13766
13767 GLOBAL(ftrace_stub)
13768 + pax_force_retaddr
13769 retq
13770
13771 trace:
13772 @@ -121,12 +126,13 @@ trace:
13773 movq 8(%rbp), %rsi
13774 subq $MCOUNT_INSN_SIZE, %rdi
13775
13776 + pax_force_fptr ftrace_trace_function
13777 call *ftrace_trace_function
13778
13779 MCOUNT_RESTORE_FRAME
13780
13781 jmp ftrace_stub
13782 -END(mcount)
13783 +ENDPROC(mcount)
13784 #endif /* CONFIG_DYNAMIC_FTRACE */
13785 #endif /* CONFIG_FUNCTION_TRACER */
13786
13787 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13788
13789 MCOUNT_RESTORE_FRAME
13790
13791 + pax_force_retaddr
13792 retq
13793 -END(ftrace_graph_caller)
13794 +ENDPROC(ftrace_graph_caller)
13795
13796 GLOBAL(return_to_handler)
13797 subq $24, %rsp
13798 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13799 movq 8(%rsp), %rdx
13800 movq (%rsp), %rax
13801 addq $24, %rsp
13802 + pax_force_fptr %rdi
13803 jmp *%rdi
13804 #endif
13805
13806 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13807 ENDPROC(native_usergs_sysret64)
13808 #endif /* CONFIG_PARAVIRT */
13809
13810 + .macro ljmpq sel, off
13811 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13812 + .byte 0x48; ljmp *1234f(%rip)
13813 + .pushsection .rodata
13814 + .align 16
13815 + 1234: .quad \off; .word \sel
13816 + .popsection
13817 +#else
13818 + pushq $\sel
13819 + pushq $\off
13820 + lretq
13821 +#endif
13822 + .endm
13823 +
13824 + .macro pax_enter_kernel
13825 + pax_set_fptr_mask
13826 +#ifdef CONFIG_PAX_KERNEXEC
13827 + call pax_enter_kernel
13828 +#endif
13829 + .endm
13830 +
13831 + .macro pax_exit_kernel
13832 +#ifdef CONFIG_PAX_KERNEXEC
13833 + call pax_exit_kernel
13834 +#endif
13835 + .endm
13836 +
13837 +#ifdef CONFIG_PAX_KERNEXEC
13838 +ENTRY(pax_enter_kernel)
13839 + pushq %rdi
13840 +
13841 +#ifdef CONFIG_PARAVIRT
13842 + PV_SAVE_REGS(CLBR_RDI)
13843 +#endif
13844 +
13845 + GET_CR0_INTO_RDI
13846 + bts $16,%rdi
13847 + jnc 3f
13848 + mov %cs,%edi
13849 + cmp $__KERNEL_CS,%edi
13850 + jnz 2f
13851 +1:
13852 +
13853 +#ifdef CONFIG_PARAVIRT
13854 + PV_RESTORE_REGS(CLBR_RDI)
13855 +#endif
13856 +
13857 + popq %rdi
13858 + pax_force_retaddr
13859 + retq
13860 +
13861 +2: ljmpq __KERNEL_CS,1f
13862 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13863 +4: SET_RDI_INTO_CR0
13864 + jmp 1b
13865 +ENDPROC(pax_enter_kernel)
13866 +
13867 +ENTRY(pax_exit_kernel)
13868 + pushq %rdi
13869 +
13870 +#ifdef CONFIG_PARAVIRT
13871 + PV_SAVE_REGS(CLBR_RDI)
13872 +#endif
13873 +
13874 + mov %cs,%rdi
13875 + cmp $__KERNEXEC_KERNEL_CS,%edi
13876 + jz 2f
13877 +1:
13878 +
13879 +#ifdef CONFIG_PARAVIRT
13880 + PV_RESTORE_REGS(CLBR_RDI);
13881 +#endif
13882 +
13883 + popq %rdi
13884 + pax_force_retaddr
13885 + retq
13886 +
13887 +2: GET_CR0_INTO_RDI
13888 + btr $16,%rdi
13889 + ljmpq __KERNEL_CS,3f
13890 +3: SET_RDI_INTO_CR0
13891 + jmp 1b
13892 +#ifdef CONFIG_PARAVIRT
13893 + PV_RESTORE_REGS(CLBR_RDI);
13894 +#endif
13895 +
13896 + popq %rdi
13897 + pax_force_retaddr
13898 + retq
13899 +ENDPROC(pax_exit_kernel)
13900 +#endif
13901 +
13902 + .macro pax_enter_kernel_user
13903 + pax_set_fptr_mask
13904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13905 + call pax_enter_kernel_user
13906 +#endif
13907 + .endm
13908 +
13909 + .macro pax_exit_kernel_user
13910 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13911 + call pax_exit_kernel_user
13912 +#endif
13913 +#ifdef CONFIG_PAX_RANDKSTACK
13914 + pushq %rax
13915 + call pax_randomize_kstack
13916 + popq %rax
13917 +#endif
13918 + .endm
13919 +
13920 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13921 +ENTRY(pax_enter_kernel_user)
13922 + pushq %rdi
13923 + pushq %rbx
13924 +
13925 +#ifdef CONFIG_PARAVIRT
13926 + PV_SAVE_REGS(CLBR_RDI)
13927 +#endif
13928 +
13929 + GET_CR3_INTO_RDI
13930 + mov %rdi,%rbx
13931 + add $__START_KERNEL_map,%rbx
13932 + sub phys_base(%rip),%rbx
13933 +
13934 +#ifdef CONFIG_PARAVIRT
13935 + pushq %rdi
13936 + cmpl $0, pv_info+PARAVIRT_enabled
13937 + jz 1f
13938 + i = 0
13939 + .rept USER_PGD_PTRS
13940 + mov i*8(%rbx),%rsi
13941 + mov $0,%sil
13942 + lea i*8(%rbx),%rdi
13943 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13944 + i = i + 1
13945 + .endr
13946 + jmp 2f
13947 +1:
13948 +#endif
13949 +
13950 + i = 0
13951 + .rept USER_PGD_PTRS
13952 + movb $0,i*8(%rbx)
13953 + i = i + 1
13954 + .endr
13955 +
13956 +#ifdef CONFIG_PARAVIRT
13957 +2: popq %rdi
13958 +#endif
13959 + SET_RDI_INTO_CR3
13960 +
13961 +#ifdef CONFIG_PAX_KERNEXEC
13962 + GET_CR0_INTO_RDI
13963 + bts $16,%rdi
13964 + SET_RDI_INTO_CR0
13965 +#endif
13966 +
13967 +#ifdef CONFIG_PARAVIRT
13968 + PV_RESTORE_REGS(CLBR_RDI)
13969 +#endif
13970 +
13971 + popq %rbx
13972 + popq %rdi
13973 + pax_force_retaddr
13974 + retq
13975 +ENDPROC(pax_enter_kernel_user)
13976 +
13977 +ENTRY(pax_exit_kernel_user)
13978 + push %rdi
13979 +
13980 +#ifdef CONFIG_PARAVIRT
13981 + pushq %rbx
13982 + PV_SAVE_REGS(CLBR_RDI)
13983 +#endif
13984 +
13985 +#ifdef CONFIG_PAX_KERNEXEC
13986 + GET_CR0_INTO_RDI
13987 + btr $16,%rdi
13988 + SET_RDI_INTO_CR0
13989 +#endif
13990 +
13991 + GET_CR3_INTO_RDI
13992 + add $__START_KERNEL_map,%rdi
13993 + sub phys_base(%rip),%rdi
13994 +
13995 +#ifdef CONFIG_PARAVIRT
13996 + cmpl $0, pv_info+PARAVIRT_enabled
13997 + jz 1f
13998 + mov %rdi,%rbx
13999 + i = 0
14000 + .rept USER_PGD_PTRS
14001 + mov i*8(%rbx),%rsi
14002 + mov $0x67,%sil
14003 + lea i*8(%rbx),%rdi
14004 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
14005 + i = i + 1
14006 + .endr
14007 + jmp 2f
14008 +1:
14009 +#endif
14010 +
14011 + i = 0
14012 + .rept USER_PGD_PTRS
14013 + movb $0x67,i*8(%rdi)
14014 + i = i + 1
14015 + .endr
14016 +
14017 +#ifdef CONFIG_PARAVIRT
14018 +2: PV_RESTORE_REGS(CLBR_RDI)
14019 + popq %rbx
14020 +#endif
14021 +
14022 + popq %rdi
14023 + pax_force_retaddr
14024 + retq
14025 +ENDPROC(pax_exit_kernel_user)
14026 +#endif
14027 +
14028 +.macro pax_erase_kstack
14029 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14030 + call pax_erase_kstack
14031 +#endif
14032 +.endm
14033 +
14034 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14035 +/*
14036 + * r11: thread_info
14037 + * rcx, rdx: can be clobbered
14038 + */
14039 +ENTRY(pax_erase_kstack)
14040 + pushq %rdi
14041 + pushq %rax
14042 + pushq %r11
14043 +
14044 + GET_THREAD_INFO(%r11)
14045 + mov TI_lowest_stack(%r11), %rdi
14046 + mov $-0xBEEF, %rax
14047 + std
14048 +
14049 +1: mov %edi, %ecx
14050 + and $THREAD_SIZE_asm - 1, %ecx
14051 + shr $3, %ecx
14052 + repne scasq
14053 + jecxz 2f
14054 +
14055 + cmp $2*8, %ecx
14056 + jc 2f
14057 +
14058 + mov $2*8, %ecx
14059 + repe scasq
14060 + jecxz 2f
14061 + jne 1b
14062 +
14063 +2: cld
14064 + mov %esp, %ecx
14065 + sub %edi, %ecx
14066 +
14067 + cmp $THREAD_SIZE_asm, %rcx
14068 + jb 3f
14069 + ud2
14070 +3:
14071 +
14072 + shr $3, %ecx
14073 + rep stosq
14074 +
14075 + mov TI_task_thread_sp0(%r11), %rdi
14076 + sub $256, %rdi
14077 + mov %rdi, TI_lowest_stack(%r11)
14078 +
14079 + popq %r11
14080 + popq %rax
14081 + popq %rdi
14082 + pax_force_retaddr
14083 + ret
14084 +ENDPROC(pax_erase_kstack)
14085 +#endif
14086
14087 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14088 #ifdef CONFIG_TRACE_IRQFLAGS
14089 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
14090 .endm
14091
14092 .macro UNFAKE_STACK_FRAME
14093 - addq $8*6, %rsp
14094 - CFI_ADJUST_CFA_OFFSET -(6*8)
14095 + addq $8*6 + ARG_SKIP, %rsp
14096 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
14097 .endm
14098
14099 /*
14100 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
14101 movq %rsp, %rsi
14102
14103 leaq -RBP(%rsp),%rdi /* arg1 for handler */
14104 - testl $3, CS(%rdi)
14105 + testb $3, CS(%rdi)
14106 je 1f
14107 SWAPGS
14108 /*
14109 @@ -355,9 +639,10 @@ ENTRY(save_rest)
14110 movq_cfi r15, R15+16
14111 movq %r11, 8(%rsp) /* return address */
14112 FIXUP_TOP_OF_STACK %r11, 16
14113 + pax_force_retaddr
14114 ret
14115 CFI_ENDPROC
14116 -END(save_rest)
14117 +ENDPROC(save_rest)
14118
14119 /* save complete stack frame */
14120 .pushsection .kprobes.text, "ax"
14121 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
14122 js 1f /* negative -> in kernel */
14123 SWAPGS
14124 xorl %ebx,%ebx
14125 -1: ret
14126 +1: pax_force_retaddr_bts
14127 + ret
14128 CFI_ENDPROC
14129 -END(save_paranoid)
14130 +ENDPROC(save_paranoid)
14131 .popsection
14132
14133 /*
14134 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
14135
14136 RESTORE_REST
14137
14138 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14139 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14140 je int_ret_from_sys_call
14141
14142 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14143 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
14144 jmp ret_from_sys_call # go to the SYSRET fastpath
14145
14146 CFI_ENDPROC
14147 -END(ret_from_fork)
14148 +ENDPROC(ret_from_fork)
14149
14150 /*
14151 * System call entry. Up to 6 arguments in registers are supported.
14152 @@ -456,7 +742,7 @@ END(ret_from_fork)
14153 ENTRY(system_call)
14154 CFI_STARTPROC simple
14155 CFI_SIGNAL_FRAME
14156 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14157 + CFI_DEF_CFA rsp,0
14158 CFI_REGISTER rip,rcx
14159 /*CFI_REGISTER rflags,r11*/
14160 SWAPGS_UNSAFE_STACK
14161 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
14162
14163 movq %rsp,PER_CPU_VAR(old_rsp)
14164 movq PER_CPU_VAR(kernel_stack),%rsp
14165 + SAVE_ARGS 8*6,0
14166 + pax_enter_kernel_user
14167 /*
14168 * No need to follow this irqs off/on section - it's straight
14169 * and short:
14170 */
14171 ENABLE_INTERRUPTS(CLBR_NONE)
14172 - SAVE_ARGS 8,0
14173 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14174 movq %rcx,RIP-ARGOFFSET(%rsp)
14175 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14176 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
14177 system_call_fastpath:
14178 cmpq $__NR_syscall_max,%rax
14179 ja badsys
14180 - movq %r10,%rcx
14181 + movq R10-ARGOFFSET(%rsp),%rcx
14182 call *sys_call_table(,%rax,8) # XXX: rip relative
14183 movq %rax,RAX-ARGOFFSET(%rsp)
14184 /*
14185 @@ -503,6 +790,8 @@ sysret_check:
14186 andl %edi,%edx
14187 jnz sysret_careful
14188 CFI_REMEMBER_STATE
14189 + pax_exit_kernel_user
14190 + pax_erase_kstack
14191 /*
14192 * sysretq will re-enable interrupts:
14193 */
14194 @@ -554,14 +843,18 @@ badsys:
14195 * jump back to the normal fast path.
14196 */
14197 auditsys:
14198 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
14199 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
14200 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
14201 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
14202 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
14203 movq %rax,%rsi /* 2nd arg: syscall number */
14204 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14205 call audit_syscall_entry
14206 +
14207 + pax_erase_kstack
14208 +
14209 LOAD_ARGS 0 /* reload call-clobbered registers */
14210 + pax_set_fptr_mask
14211 jmp system_call_fastpath
14212
14213 /*
14214 @@ -591,16 +884,20 @@ tracesys:
14215 FIXUP_TOP_OF_STACK %rdi
14216 movq %rsp,%rdi
14217 call syscall_trace_enter
14218 +
14219 + pax_erase_kstack
14220 +
14221 /*
14222 * Reload arg registers from stack in case ptrace changed them.
14223 * We don't reload %rax because syscall_trace_enter() returned
14224 * the value it wants us to use in the table lookup.
14225 */
14226 LOAD_ARGS ARGOFFSET, 1
14227 + pax_set_fptr_mask
14228 RESTORE_REST
14229 cmpq $__NR_syscall_max,%rax
14230 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
14231 - movq %r10,%rcx /* fixup for C */
14232 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
14233 call *sys_call_table(,%rax,8)
14234 movq %rax,RAX-ARGOFFSET(%rsp)
14235 /* Use IRET because user could have changed frame */
14236 @@ -612,7 +909,7 @@ tracesys:
14237 GLOBAL(int_ret_from_sys_call)
14238 DISABLE_INTERRUPTS(CLBR_NONE)
14239 TRACE_IRQS_OFF
14240 - testl $3,CS-ARGOFFSET(%rsp)
14241 + testb $3,CS-ARGOFFSET(%rsp)
14242 je retint_restore_args
14243 movl $_TIF_ALLWORK_MASK,%edi
14244 /* edi: mask to check */
14245 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14246 andl %edi,%edx
14247 jnz int_careful
14248 andl $~TS_COMPAT,TI_status(%rcx)
14249 + pax_erase_kstack
14250 jmp retint_swapgs
14251
14252 /* Either reschedule or signal or syscall exit tracking needed. */
14253 @@ -669,7 +967,7 @@ int_restore_rest:
14254 TRACE_IRQS_OFF
14255 jmp int_with_check
14256 CFI_ENDPROC
14257 -END(system_call)
14258 +ENDPROC(system_call)
14259
14260 /*
14261 * Certain special system calls that need to save a complete full stack frame.
14262 @@ -685,7 +983,7 @@ ENTRY(\label)
14263 call \func
14264 jmp ptregscall_common
14265 CFI_ENDPROC
14266 -END(\label)
14267 +ENDPROC(\label)
14268 .endm
14269
14270 PTREGSCALL stub_clone, sys_clone, %r8
14271 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14272 movq_cfi_restore R12+8, r12
14273 movq_cfi_restore RBP+8, rbp
14274 movq_cfi_restore RBX+8, rbx
14275 + pax_force_retaddr
14276 ret $REST_SKIP /* pop extended registers */
14277 CFI_ENDPROC
14278 -END(ptregscall_common)
14279 +ENDPROC(ptregscall_common)
14280
14281 ENTRY(stub_execve)
14282 CFI_STARTPROC
14283 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14284 RESTORE_REST
14285 jmp int_ret_from_sys_call
14286 CFI_ENDPROC
14287 -END(stub_execve)
14288 +ENDPROC(stub_execve)
14289
14290 /*
14291 * sigreturn is special because it needs to restore all registers on return.
14292 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14293 RESTORE_REST
14294 jmp int_ret_from_sys_call
14295 CFI_ENDPROC
14296 -END(stub_rt_sigreturn)
14297 +ENDPROC(stub_rt_sigreturn)
14298
14299 /*
14300 * Build the entry stubs and pointer table with some assembler magic.
14301 @@ -773,7 +1072,7 @@ vector=vector+1
14302 2: jmp common_interrupt
14303 .endr
14304 CFI_ENDPROC
14305 -END(irq_entries_start)
14306 +ENDPROC(irq_entries_start)
14307
14308 .previous
14309 END(interrupt)
14310 @@ -793,6 +1092,16 @@ END(interrupt)
14311 subq $ORIG_RAX-RBP, %rsp
14312 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14313 SAVE_ARGS_IRQ
14314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14315 + testb $3, CS(%rdi)
14316 + jnz 1f
14317 + pax_enter_kernel
14318 + jmp 2f
14319 +1: pax_enter_kernel_user
14320 +2:
14321 +#else
14322 + pax_enter_kernel
14323 +#endif
14324 call \func
14325 .endm
14326
14327 @@ -824,7 +1133,7 @@ ret_from_intr:
14328
14329 exit_intr:
14330 GET_THREAD_INFO(%rcx)
14331 - testl $3,CS-ARGOFFSET(%rsp)
14332 + testb $3,CS-ARGOFFSET(%rsp)
14333 je retint_kernel
14334
14335 /* Interrupt came from user space */
14336 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14337 * The iretq could re-enable interrupts:
14338 */
14339 DISABLE_INTERRUPTS(CLBR_ANY)
14340 + pax_exit_kernel_user
14341 TRACE_IRQS_IRETQ
14342 SWAPGS
14343 jmp restore_args
14344
14345 retint_restore_args: /* return to kernel space */
14346 DISABLE_INTERRUPTS(CLBR_ANY)
14347 + pax_exit_kernel
14348 + pax_force_retaddr RIP-ARGOFFSET
14349 /*
14350 * The iretq could re-enable interrupts:
14351 */
14352 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14353 #endif
14354
14355 CFI_ENDPROC
14356 -END(common_interrupt)
14357 +ENDPROC(common_interrupt)
14358 /*
14359 * End of kprobes section
14360 */
14361 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14362 interrupt \do_sym
14363 jmp ret_from_intr
14364 CFI_ENDPROC
14365 -END(\sym)
14366 +ENDPROC(\sym)
14367 .endm
14368
14369 #ifdef CONFIG_SMP
14370 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14371 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14372 call error_entry
14373 DEFAULT_FRAME 0
14374 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14375 + testb $3, CS(%rsp)
14376 + jnz 1f
14377 + pax_enter_kernel
14378 + jmp 2f
14379 +1: pax_enter_kernel_user
14380 +2:
14381 +#else
14382 + pax_enter_kernel
14383 +#endif
14384 movq %rsp,%rdi /* pt_regs pointer */
14385 xorl %esi,%esi /* no error code */
14386 call \do_sym
14387 jmp error_exit /* %ebx: no swapgs flag */
14388 CFI_ENDPROC
14389 -END(\sym)
14390 +ENDPROC(\sym)
14391 .endm
14392
14393 .macro paranoidzeroentry sym do_sym
14394 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14395 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14396 call save_paranoid
14397 TRACE_IRQS_OFF
14398 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14399 + testb $3, CS(%rsp)
14400 + jnz 1f
14401 + pax_enter_kernel
14402 + jmp 2f
14403 +1: pax_enter_kernel_user
14404 +2:
14405 +#else
14406 + pax_enter_kernel
14407 +#endif
14408 movq %rsp,%rdi /* pt_regs pointer */
14409 xorl %esi,%esi /* no error code */
14410 call \do_sym
14411 jmp paranoid_exit /* %ebx: no swapgs flag */
14412 CFI_ENDPROC
14413 -END(\sym)
14414 +ENDPROC(\sym)
14415 .endm
14416
14417 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14418 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14419 .macro paranoidzeroentry_ist sym do_sym ist
14420 ENTRY(\sym)
14421 INTR_FRAME
14422 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14423 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14424 call save_paranoid
14425 TRACE_IRQS_OFF
14426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14427 + testb $3, CS(%rsp)
14428 + jnz 1f
14429 + pax_enter_kernel
14430 + jmp 2f
14431 +1: pax_enter_kernel_user
14432 +2:
14433 +#else
14434 + pax_enter_kernel
14435 +#endif
14436 movq %rsp,%rdi /* pt_regs pointer */
14437 xorl %esi,%esi /* no error code */
14438 +#ifdef CONFIG_SMP
14439 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14440 + lea init_tss(%r12), %r12
14441 +#else
14442 + lea init_tss(%rip), %r12
14443 +#endif
14444 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14445 call \do_sym
14446 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14447 jmp paranoid_exit /* %ebx: no swapgs flag */
14448 CFI_ENDPROC
14449 -END(\sym)
14450 +ENDPROC(\sym)
14451 .endm
14452
14453 .macro errorentry sym do_sym
14454 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14455 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14456 call error_entry
14457 DEFAULT_FRAME 0
14458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14459 + testb $3, CS(%rsp)
14460 + jnz 1f
14461 + pax_enter_kernel
14462 + jmp 2f
14463 +1: pax_enter_kernel_user
14464 +2:
14465 +#else
14466 + pax_enter_kernel
14467 +#endif
14468 movq %rsp,%rdi /* pt_regs pointer */
14469 movq ORIG_RAX(%rsp),%rsi /* get error code */
14470 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14471 call \do_sym
14472 jmp error_exit /* %ebx: no swapgs flag */
14473 CFI_ENDPROC
14474 -END(\sym)
14475 +ENDPROC(\sym)
14476 .endm
14477
14478 /* error code is on the stack already */
14479 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14480 call save_paranoid
14481 DEFAULT_FRAME 0
14482 TRACE_IRQS_OFF
14483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14484 + testb $3, CS(%rsp)
14485 + jnz 1f
14486 + pax_enter_kernel
14487 + jmp 2f
14488 +1: pax_enter_kernel_user
14489 +2:
14490 +#else
14491 + pax_enter_kernel
14492 +#endif
14493 movq %rsp,%rdi /* pt_regs pointer */
14494 movq ORIG_RAX(%rsp),%rsi /* get error code */
14495 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14496 call \do_sym
14497 jmp paranoid_exit /* %ebx: no swapgs flag */
14498 CFI_ENDPROC
14499 -END(\sym)
14500 +ENDPROC(\sym)
14501 .endm
14502
14503 zeroentry divide_error do_divide_error
14504 @@ -1129,9 +1497,10 @@ gs_change:
14505 2: mfence /* workaround */
14506 SWAPGS
14507 popfq_cfi
14508 + pax_force_retaddr
14509 ret
14510 CFI_ENDPROC
14511 -END(native_load_gs_index)
14512 +ENDPROC(native_load_gs_index)
14513
14514 .section __ex_table,"a"
14515 .align 8
14516 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14517 * Here we are in the child and the registers are set as they were
14518 * at kernel_thread() invocation in the parent.
14519 */
14520 + pax_force_fptr %rsi
14521 call *%rsi
14522 # exit
14523 mov %eax, %edi
14524 call do_exit
14525 ud2 # padding for call trace
14526 CFI_ENDPROC
14527 -END(kernel_thread_helper)
14528 +ENDPROC(kernel_thread_helper)
14529
14530 /*
14531 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14532 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14533 RESTORE_REST
14534 testq %rax,%rax
14535 je int_ret_from_sys_call
14536 - RESTORE_ARGS
14537 UNFAKE_STACK_FRAME
14538 + pax_force_retaddr
14539 ret
14540 CFI_ENDPROC
14541 -END(kernel_execve)
14542 +ENDPROC(kernel_execve)
14543
14544 /* Call softirq on interrupt stack. Interrupts are off. */
14545 ENTRY(call_softirq)
14546 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14547 CFI_DEF_CFA_REGISTER rsp
14548 CFI_ADJUST_CFA_OFFSET -8
14549 decl PER_CPU_VAR(irq_count)
14550 + pax_force_retaddr
14551 ret
14552 CFI_ENDPROC
14553 -END(call_softirq)
14554 +ENDPROC(call_softirq)
14555
14556 #ifdef CONFIG_XEN
14557 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14558 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14559 decl PER_CPU_VAR(irq_count)
14560 jmp error_exit
14561 CFI_ENDPROC
14562 -END(xen_do_hypervisor_callback)
14563 +ENDPROC(xen_do_hypervisor_callback)
14564
14565 /*
14566 * Hypervisor uses this for application faults while it executes.
14567 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14568 SAVE_ALL
14569 jmp error_exit
14570 CFI_ENDPROC
14571 -END(xen_failsafe_callback)
14572 +ENDPROC(xen_failsafe_callback)
14573
14574 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14575 xen_hvm_callback_vector xen_evtchn_do_upcall
14576 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14577 TRACE_IRQS_OFF
14578 testl %ebx,%ebx /* swapgs needed? */
14579 jnz paranoid_restore
14580 - testl $3,CS(%rsp)
14581 + testb $3,CS(%rsp)
14582 jnz paranoid_userspace
14583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14584 + pax_exit_kernel
14585 + TRACE_IRQS_IRETQ 0
14586 + SWAPGS_UNSAFE_STACK
14587 + RESTORE_ALL 8
14588 + pax_force_retaddr_bts
14589 + jmp irq_return
14590 +#endif
14591 paranoid_swapgs:
14592 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14593 + pax_exit_kernel_user
14594 +#else
14595 + pax_exit_kernel
14596 +#endif
14597 TRACE_IRQS_IRETQ 0
14598 SWAPGS_UNSAFE_STACK
14599 RESTORE_ALL 8
14600 jmp irq_return
14601 paranoid_restore:
14602 + pax_exit_kernel
14603 TRACE_IRQS_IRETQ 0
14604 RESTORE_ALL 8
14605 + pax_force_retaddr_bts
14606 jmp irq_return
14607 paranoid_userspace:
14608 GET_THREAD_INFO(%rcx)
14609 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14610 TRACE_IRQS_OFF
14611 jmp paranoid_userspace
14612 CFI_ENDPROC
14613 -END(paranoid_exit)
14614 +ENDPROC(paranoid_exit)
14615
14616 /*
14617 * Exception entry point. This expects an error code/orig_rax on the stack.
14618 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14619 movq_cfi r14, R14+8
14620 movq_cfi r15, R15+8
14621 xorl %ebx,%ebx
14622 - testl $3,CS+8(%rsp)
14623 + testb $3,CS+8(%rsp)
14624 je error_kernelspace
14625 error_swapgs:
14626 SWAPGS
14627 error_sti:
14628 TRACE_IRQS_OFF
14629 + pax_force_retaddr_bts
14630 ret
14631
14632 /*
14633 @@ -1453,7 +1840,7 @@ bstep_iret:
14634 movq %rcx,RIP+8(%rsp)
14635 jmp error_swapgs
14636 CFI_ENDPROC
14637 -END(error_entry)
14638 +ENDPROC(error_entry)
14639
14640
14641 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14642 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14643 jnz retint_careful
14644 jmp retint_swapgs
14645 CFI_ENDPROC
14646 -END(error_exit)
14647 +ENDPROC(error_exit)
14648
14649
14650 /* runs on exception stack */
14651 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14652 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14653 call save_paranoid
14654 DEFAULT_FRAME 0
14655 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14656 + testb $3, CS(%rsp)
14657 + jnz 1f
14658 + pax_enter_kernel
14659 + jmp 2f
14660 +1: pax_enter_kernel_user
14661 +2:
14662 +#else
14663 + pax_enter_kernel
14664 +#endif
14665 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14666 movq %rsp,%rdi
14667 movq $-1,%rsi
14668 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14669 DISABLE_INTERRUPTS(CLBR_NONE)
14670 testl %ebx,%ebx /* swapgs needed? */
14671 jnz nmi_restore
14672 - testl $3,CS(%rsp)
14673 + testb $3,CS(%rsp)
14674 jnz nmi_userspace
14675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14676 + pax_exit_kernel
14677 + SWAPGS_UNSAFE_STACK
14678 + RESTORE_ALL 8
14679 + pax_force_retaddr_bts
14680 + jmp irq_return
14681 +#endif
14682 nmi_swapgs:
14683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14684 + pax_exit_kernel_user
14685 +#else
14686 + pax_exit_kernel
14687 +#endif
14688 SWAPGS_UNSAFE_STACK
14689 + RESTORE_ALL 8
14690 + jmp irq_return
14691 nmi_restore:
14692 + pax_exit_kernel
14693 RESTORE_ALL 8
14694 + pax_force_retaddr_bts
14695 jmp irq_return
14696 nmi_userspace:
14697 GET_THREAD_INFO(%rcx)
14698 @@ -1529,14 +1942,14 @@ nmi_schedule:
14699 jmp paranoid_exit
14700 CFI_ENDPROC
14701 #endif
14702 -END(nmi)
14703 +ENDPROC(nmi)
14704
14705 ENTRY(ignore_sysret)
14706 CFI_STARTPROC
14707 mov $-ENOSYS,%eax
14708 sysret
14709 CFI_ENDPROC
14710 -END(ignore_sysret)
14711 +ENDPROC(ignore_sysret)
14712
14713 /*
14714 * End of kprobes section
14715 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14716 index c9a281f..ce2f317 100644
14717 --- a/arch/x86/kernel/ftrace.c
14718 +++ b/arch/x86/kernel/ftrace.c
14719 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14720 static const void *mod_code_newcode; /* holds the text to write to the IP */
14721
14722 static unsigned nmi_wait_count;
14723 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14724 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14725
14726 int ftrace_arch_read_dyn_info(char *buf, int size)
14727 {
14728 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14729
14730 r = snprintf(buf, size, "%u %u",
14731 nmi_wait_count,
14732 - atomic_read(&nmi_update_count));
14733 + atomic_read_unchecked(&nmi_update_count));
14734 return r;
14735 }
14736
14737 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14738
14739 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14740 smp_rmb();
14741 + pax_open_kernel();
14742 ftrace_mod_code();
14743 - atomic_inc(&nmi_update_count);
14744 + pax_close_kernel();
14745 + atomic_inc_unchecked(&nmi_update_count);
14746 }
14747 /* Must have previous changes seen before executions */
14748 smp_mb();
14749 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14750 {
14751 unsigned char replaced[MCOUNT_INSN_SIZE];
14752
14753 + ip = ktla_ktva(ip);
14754 +
14755 /*
14756 * Note: Due to modules and __init, code can
14757 * disappear and change, we need to protect against faulting
14758 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14759 unsigned char old[MCOUNT_INSN_SIZE], *new;
14760 int ret;
14761
14762 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14763 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14764 new = ftrace_call_replace(ip, (unsigned long)func);
14765 ret = ftrace_modify_code(ip, old, new);
14766
14767 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14768 {
14769 unsigned char code[MCOUNT_INSN_SIZE];
14770
14771 + ip = ktla_ktva(ip);
14772 +
14773 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14774 return -EFAULT;
14775
14776 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14777 index 3bb0850..55a56f4 100644
14778 --- a/arch/x86/kernel/head32.c
14779 +++ b/arch/x86/kernel/head32.c
14780 @@ -19,6 +19,7 @@
14781 #include <asm/io_apic.h>
14782 #include <asm/bios_ebda.h>
14783 #include <asm/tlbflush.h>
14784 +#include <asm/boot.h>
14785
14786 static void __init i386_default_early_setup(void)
14787 {
14788 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14789 {
14790 memblock_init();
14791
14792 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14793 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14794
14795 #ifdef CONFIG_BLK_DEV_INITRD
14796 /* Reserve INITRD */
14797 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14798 index ce0be7c..c41476e 100644
14799 --- a/arch/x86/kernel/head_32.S
14800 +++ b/arch/x86/kernel/head_32.S
14801 @@ -25,6 +25,12 @@
14802 /* Physical address */
14803 #define pa(X) ((X) - __PAGE_OFFSET)
14804
14805 +#ifdef CONFIG_PAX_KERNEXEC
14806 +#define ta(X) (X)
14807 +#else
14808 +#define ta(X) ((X) - __PAGE_OFFSET)
14809 +#endif
14810 +
14811 /*
14812 * References to members of the new_cpu_data structure.
14813 */
14814 @@ -54,11 +60,7 @@
14815 * and small than max_low_pfn, otherwise will waste some page table entries
14816 */
14817
14818 -#if PTRS_PER_PMD > 1
14819 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14820 -#else
14821 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14822 -#endif
14823 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14824
14825 /* Number of possible pages in the lowmem region */
14826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14827 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14829
14830 /*
14831 + * Real beginning of normal "text" segment
14832 + */
14833 +ENTRY(stext)
14834 +ENTRY(_stext)
14835 +
14836 +/*
14837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14838 * %esi points to the real-mode code as a 32-bit pointer.
14839 * CS and DS must be 4 GB flat segments, but we don't depend on
14840 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14841 * can.
14842 */
14843 __HEAD
14844 +
14845 +#ifdef CONFIG_PAX_KERNEXEC
14846 + jmp startup_32
14847 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14848 +.fill PAGE_SIZE-5,1,0xcc
14849 +#endif
14850 +
14851 ENTRY(startup_32)
14852 movl pa(stack_start),%ecx
14853
14854 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14855 2:
14856 leal -__PAGE_OFFSET(%ecx),%esp
14857
14858 +#ifdef CONFIG_SMP
14859 + movl $pa(cpu_gdt_table),%edi
14860 + movl $__per_cpu_load,%eax
14861 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14862 + rorl $16,%eax
14863 + movb %al,__KERNEL_PERCPU + 4(%edi)
14864 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14865 + movl $__per_cpu_end - 1,%eax
14866 + subl $__per_cpu_start,%eax
14867 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14868 +#endif
14869 +
14870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14871 + movl $NR_CPUS,%ecx
14872 + movl $pa(cpu_gdt_table),%edi
14873 +1:
14874 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14875 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14876 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14877 + addl $PAGE_SIZE_asm,%edi
14878 + loop 1b
14879 +#endif
14880 +
14881 +#ifdef CONFIG_PAX_KERNEXEC
14882 + movl $pa(boot_gdt),%edi
14883 + movl $__LOAD_PHYSICAL_ADDR,%eax
14884 + movw %ax,__BOOT_CS + 2(%edi)
14885 + rorl $16,%eax
14886 + movb %al,__BOOT_CS + 4(%edi)
14887 + movb %ah,__BOOT_CS + 7(%edi)
14888 + rorl $16,%eax
14889 +
14890 + ljmp $(__BOOT_CS),$1f
14891 +1:
14892 +
14893 + movl $NR_CPUS,%ecx
14894 + movl $pa(cpu_gdt_table),%edi
14895 + addl $__PAGE_OFFSET,%eax
14896 +1:
14897 + movw %ax,__KERNEL_CS + 2(%edi)
14898 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14899 + rorl $16,%eax
14900 + movb %al,__KERNEL_CS + 4(%edi)
14901 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14902 + movb %ah,__KERNEL_CS + 7(%edi)
14903 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14904 + rorl $16,%eax
14905 + addl $PAGE_SIZE_asm,%edi
14906 + loop 1b
14907 +#endif
14908 +
14909 /*
14910 * Clear BSS first so that there are no surprises...
14911 */
14912 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14913 movl %eax, pa(max_pfn_mapped)
14914
14915 /* Do early initialization of the fixmap area */
14916 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14917 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14918 +#ifdef CONFIG_COMPAT_VDSO
14919 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14920 +#else
14921 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14922 +#endif
14923 #else /* Not PAE */
14924
14925 page_pde_offset = (__PAGE_OFFSET >> 20);
14926 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14927 movl %eax, pa(max_pfn_mapped)
14928
14929 /* Do early initialization of the fixmap area */
14930 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14931 - movl %eax,pa(initial_page_table+0xffc)
14932 +#ifdef CONFIG_COMPAT_VDSO
14933 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14934 +#else
14935 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14936 +#endif
14937 #endif
14938
14939 #ifdef CONFIG_PARAVIRT
14940 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14941 cmpl $num_subarch_entries, %eax
14942 jae bad_subarch
14943
14944 - movl pa(subarch_entries)(,%eax,4), %eax
14945 - subl $__PAGE_OFFSET, %eax
14946 - jmp *%eax
14947 + jmp *pa(subarch_entries)(,%eax,4)
14948
14949 bad_subarch:
14950 WEAK(lguest_entry)
14951 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14952 __INITDATA
14953
14954 subarch_entries:
14955 - .long default_entry /* normal x86/PC */
14956 - .long lguest_entry /* lguest hypervisor */
14957 - .long xen_entry /* Xen hypervisor */
14958 - .long default_entry /* Moorestown MID */
14959 + .long ta(default_entry) /* normal x86/PC */
14960 + .long ta(lguest_entry) /* lguest hypervisor */
14961 + .long ta(xen_entry) /* Xen hypervisor */
14962 + .long ta(default_entry) /* Moorestown MID */
14963 num_subarch_entries = (. - subarch_entries) / 4
14964 .previous
14965 #else
14966 @@ -312,6 +382,7 @@ default_entry:
14967 orl %edx,%eax
14968 movl %eax,%cr4
14969
14970 +#ifdef CONFIG_X86_PAE
14971 testb $X86_CR4_PAE, %al # check if PAE is enabled
14972 jz 6f
14973
14974 @@ -340,6 +411,9 @@ default_entry:
14975 /* Make changes effective */
14976 wrmsr
14977
14978 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14979 +#endif
14980 +
14981 6:
14982
14983 /*
14984 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14986 movl %eax,%ss # after changing gdt.
14987
14988 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14989 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14990 movl %eax,%ds
14991 movl %eax,%es
14992
14993 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14994 */
14995 cmpb $0,ready
14996 jne 1f
14997 - movl $gdt_page,%eax
14998 + movl $cpu_gdt_table,%eax
14999 movl $stack_canary,%ecx
15000 +#ifdef CONFIG_SMP
15001 + addl $__per_cpu_load,%ecx
15002 +#endif
15003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
15004 shrl $16, %ecx
15005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
15006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
15007 1:
15008 -#endif
15009 movl $(__KERNEL_STACK_CANARY),%eax
15010 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15011 + movl $(__USER_DS),%eax
15012 +#else
15013 + xorl %eax,%eax
15014 +#endif
15015 movl %eax,%gs
15016
15017 xorl %eax,%eax # Clear LDT
15018 @@ -558,22 +639,22 @@ early_page_fault:
15019 jmp early_fault
15020
15021 early_fault:
15022 - cld
15023 #ifdef CONFIG_PRINTK
15024 + cmpl $1,%ss:early_recursion_flag
15025 + je hlt_loop
15026 + incl %ss:early_recursion_flag
15027 + cld
15028 pusha
15029 movl $(__KERNEL_DS),%eax
15030 movl %eax,%ds
15031 movl %eax,%es
15032 - cmpl $2,early_recursion_flag
15033 - je hlt_loop
15034 - incl early_recursion_flag
15035 movl %cr2,%eax
15036 pushl %eax
15037 pushl %edx /* trapno */
15038 pushl $fault_msg
15039 call printk
15040 +; call dump_stack
15041 #endif
15042 - call dump_stack
15043 hlt_loop:
15044 hlt
15045 jmp hlt_loop
15046 @@ -581,8 +662,11 @@ hlt_loop:
15047 /* This is the default interrupt "handler" :-) */
15048 ALIGN
15049 ignore_int:
15050 - cld
15051 #ifdef CONFIG_PRINTK
15052 + cmpl $2,%ss:early_recursion_flag
15053 + je hlt_loop
15054 + incl %ss:early_recursion_flag
15055 + cld
15056 pushl %eax
15057 pushl %ecx
15058 pushl %edx
15059 @@ -591,9 +675,6 @@ ignore_int:
15060 movl $(__KERNEL_DS),%eax
15061 movl %eax,%ds
15062 movl %eax,%es
15063 - cmpl $2,early_recursion_flag
15064 - je hlt_loop
15065 - incl early_recursion_flag
15066 pushl 16(%esp)
15067 pushl 24(%esp)
15068 pushl 32(%esp)
15069 @@ -622,29 +703,43 @@ ENTRY(initial_code)
15070 /*
15071 * BSS section
15072 */
15073 -__PAGE_ALIGNED_BSS
15074 - .align PAGE_SIZE
15075 #ifdef CONFIG_X86_PAE
15076 +.section .initial_pg_pmd,"a",@progbits
15077 initial_pg_pmd:
15078 .fill 1024*KPMDS,4,0
15079 #else
15080 +.section .initial_page_table,"a",@progbits
15081 ENTRY(initial_page_table)
15082 .fill 1024,4,0
15083 #endif
15084 +.section .initial_pg_fixmap,"a",@progbits
15085 initial_pg_fixmap:
15086 .fill 1024,4,0
15087 +.section .empty_zero_page,"a",@progbits
15088 ENTRY(empty_zero_page)
15089 .fill 4096,1,0
15090 +.section .swapper_pg_dir,"a",@progbits
15091 ENTRY(swapper_pg_dir)
15092 +#ifdef CONFIG_X86_PAE
15093 + .fill 4,8,0
15094 +#else
15095 .fill 1024,4,0
15096 +#endif
15097 +
15098 +/*
15099 + * The IDT has to be page-aligned to simplify the Pentium
15100 + * F0 0F bug workaround.. We have a special link segment
15101 + * for this.
15102 + */
15103 +.section .idt,"a",@progbits
15104 +ENTRY(idt_table)
15105 + .fill 256,8,0
15106
15107 /*
15108 * This starts the data section.
15109 */
15110 #ifdef CONFIG_X86_PAE
15111 -__PAGE_ALIGNED_DATA
15112 - /* Page-aligned for the benefit of paravirt? */
15113 - .align PAGE_SIZE
15114 +.section .initial_page_table,"a",@progbits
15115 ENTRY(initial_page_table)
15116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
15117 # if KPMDS == 3
15118 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
15119 # error "Kernel PMDs should be 1, 2 or 3"
15120 # endif
15121 .align PAGE_SIZE /* needs to be page-sized too */
15122 +
15123 +#ifdef CONFIG_PAX_PER_CPU_PGD
15124 +ENTRY(cpu_pgd)
15125 + .rept NR_CPUS
15126 + .fill 4,8,0
15127 + .endr
15128 +#endif
15129 +
15130 #endif
15131
15132 .data
15133 .balign 4
15134 ENTRY(stack_start)
15135 - .long init_thread_union+THREAD_SIZE
15136 + .long init_thread_union+THREAD_SIZE-8
15137
15138 +ready: .byte 0
15139 +
15140 +.section .rodata,"a",@progbits
15141 early_recursion_flag:
15142 .long 0
15143
15144 -ready: .byte 0
15145 -
15146 int_msg:
15147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
15148
15149 @@ -707,7 +811,7 @@ fault_msg:
15150 .word 0 # 32 bit align gdt_desc.address
15151 boot_gdt_descr:
15152 .word __BOOT_DS+7
15153 - .long boot_gdt - __PAGE_OFFSET
15154 + .long pa(boot_gdt)
15155
15156 .word 0 # 32-bit align idt_desc.address
15157 idt_descr:
15158 @@ -718,7 +822,7 @@ idt_descr:
15159 .word 0 # 32 bit align gdt_desc.address
15160 ENTRY(early_gdt_descr)
15161 .word GDT_ENTRIES*8-1
15162 - .long gdt_page /* Overwritten for secondary CPUs */
15163 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
15164
15165 /*
15166 * The boot_gdt must mirror the equivalent in setup.S and is
15167 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
15168 .align L1_CACHE_BYTES
15169 ENTRY(boot_gdt)
15170 .fill GDT_ENTRY_BOOT_CS,8,0
15171 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
15172 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
15173 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
15174 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
15175 +
15176 + .align PAGE_SIZE_asm
15177 +ENTRY(cpu_gdt_table)
15178 + .rept NR_CPUS
15179 + .quad 0x0000000000000000 /* NULL descriptor */
15180 + .quad 0x0000000000000000 /* 0x0b reserved */
15181 + .quad 0x0000000000000000 /* 0x13 reserved */
15182 + .quad 0x0000000000000000 /* 0x1b reserved */
15183 +
15184 +#ifdef CONFIG_PAX_KERNEXEC
15185 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
15186 +#else
15187 + .quad 0x0000000000000000 /* 0x20 unused */
15188 +#endif
15189 +
15190 + .quad 0x0000000000000000 /* 0x28 unused */
15191 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
15192 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
15193 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
15194 + .quad 0x0000000000000000 /* 0x4b reserved */
15195 + .quad 0x0000000000000000 /* 0x53 reserved */
15196 + .quad 0x0000000000000000 /* 0x5b reserved */
15197 +
15198 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
15199 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
15200 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
15201 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
15202 +
15203 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
15204 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
15205 +
15206 + /*
15207 + * Segments used for calling PnP BIOS have byte granularity.
15208 + * The code segments and data segments have fixed 64k limits,
15209 + * the transfer segment sizes are set at run time.
15210 + */
15211 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
15212 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
15213 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
15214 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
15215 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
15216 +
15217 + /*
15218 + * The APM segments have byte granularity and their bases
15219 + * are set at run time. All have 64k limits.
15220 + */
15221 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
15222 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
15223 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
15224 +
15225 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
15226 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15227 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
15228 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
15229 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
15230 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
15231 +
15232 + /* Be sure this is zeroed to avoid false validations in Xen */
15233 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
15234 + .endr
15235 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15236 index e11e394..9aebc5d 100644
15237 --- a/arch/x86/kernel/head_64.S
15238 +++ b/arch/x86/kernel/head_64.S
15239 @@ -19,6 +19,8 @@
15240 #include <asm/cache.h>
15241 #include <asm/processor-flags.h>
15242 #include <asm/percpu.h>
15243 +#include <asm/cpufeature.h>
15244 +#include <asm/alternative-asm.h>
15245
15246 #ifdef CONFIG_PARAVIRT
15247 #include <asm/asm-offsets.h>
15248 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15252 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15253 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15254 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15255 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15256 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15257 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15258
15259 .text
15260 __HEAD
15261 @@ -85,35 +93,23 @@ startup_64:
15262 */
15263 addq %rbp, init_level4_pgt + 0(%rip)
15264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15265 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15266 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15267 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15269
15270 addq %rbp, level3_ident_pgt + 0(%rip)
15271 +#ifndef CONFIG_XEN
15272 + addq %rbp, level3_ident_pgt + 8(%rip)
15273 +#endif
15274
15275 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15276 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15277 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15278 +
15279 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15280 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15281
15282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15283 -
15284 - /* Add an Identity mapping if I am above 1G */
15285 - leaq _text(%rip), %rdi
15286 - andq $PMD_PAGE_MASK, %rdi
15287 -
15288 - movq %rdi, %rax
15289 - shrq $PUD_SHIFT, %rax
15290 - andq $(PTRS_PER_PUD - 1), %rax
15291 - jz ident_complete
15292 -
15293 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15294 - leaq level3_ident_pgt(%rip), %rbx
15295 - movq %rdx, 0(%rbx, %rax, 8)
15296 -
15297 - movq %rdi, %rax
15298 - shrq $PMD_SHIFT, %rax
15299 - andq $(PTRS_PER_PMD - 1), %rax
15300 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15301 - leaq level2_spare_pgt(%rip), %rbx
15302 - movq %rdx, 0(%rbx, %rax, 8)
15303 -ident_complete:
15304 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15305
15306 /*
15307 * Fixup the kernel text+data virtual addresses. Note that
15308 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15309 * after the boot processor executes this code.
15310 */
15311
15312 - /* Enable PAE mode and PGE */
15313 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15314 + /* Enable PAE mode and PSE/PGE */
15315 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15316 movq %rax, %cr4
15317
15318 /* Setup early boot stage 4 level pagetables. */
15319 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15320 movl $MSR_EFER, %ecx
15321 rdmsr
15322 btsl $_EFER_SCE, %eax /* Enable System Call */
15323 - btl $20,%edi /* No Execute supported? */
15324 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15325 jnc 1f
15326 btsl $_EFER_NX, %eax
15327 + leaq init_level4_pgt(%rip), %rdi
15328 +#ifndef CONFIG_EFI
15329 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15330 +#endif
15331 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15332 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15333 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15334 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15335 1: wrmsr /* Make changes effective */
15336
15337 /* Setup cr0 */
15338 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15339 * jump. In addition we need to ensure %cs is set so we make this
15340 * a far return.
15341 */
15342 + pax_set_fptr_mask
15343 movq initial_code(%rip),%rax
15344 pushq $0 # fake return address to stop unwinder
15345 pushq $__KERNEL_CS # set correct cs
15346 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15347 bad_address:
15348 jmp bad_address
15349
15350 - .section ".init.text","ax"
15351 + __INIT
15352 #ifdef CONFIG_EARLY_PRINTK
15353 .globl early_idt_handlers
15354 early_idt_handlers:
15355 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15356 #endif /* EARLY_PRINTK */
15357 1: hlt
15358 jmp 1b
15359 + .previous
15360
15361 #ifdef CONFIG_EARLY_PRINTK
15362 + __INITDATA
15363 early_recursion_flag:
15364 .long 0
15365 + .previous
15366
15367 + .section .rodata,"a",@progbits
15368 early_idt_msg:
15369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15370 early_idt_ripmsg:
15371 .asciz "RIP %s\n"
15372 + .previous
15373 #endif /* CONFIG_EARLY_PRINTK */
15374 - .previous
15375
15376 + .section .rodata,"a",@progbits
15377 #define NEXT_PAGE(name) \
15378 .balign PAGE_SIZE; \
15379 ENTRY(name)
15380 @@ -338,7 +348,6 @@ ENTRY(name)
15381 i = i + 1 ; \
15382 .endr
15383
15384 - .data
15385 /*
15386 * This default setting generates an ident mapping at address 0x100000
15387 * and a mapping for the kernel that precisely maps virtual address
15388 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15392 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15393 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15394 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15395 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15396 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15397 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15398 .org init_level4_pgt + L4_START_KERNEL*8, 0
15399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15401
15402 +#ifdef CONFIG_PAX_PER_CPU_PGD
15403 +NEXT_PAGE(cpu_pgd)
15404 + .rept NR_CPUS
15405 + .fill 512,8,0
15406 + .endr
15407 +#endif
15408 +
15409 NEXT_PAGE(level3_ident_pgt)
15410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15411 +#ifdef CONFIG_XEN
15412 .fill 511,8,0
15413 +#else
15414 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15415 + .fill 510,8,0
15416 +#endif
15417 +
15418 +NEXT_PAGE(level3_vmalloc_start_pgt)
15419 + .fill 512,8,0
15420 +
15421 +NEXT_PAGE(level3_vmalloc_end_pgt)
15422 + .fill 512,8,0
15423 +
15424 +NEXT_PAGE(level3_vmemmap_pgt)
15425 + .fill L3_VMEMMAP_START,8,0
15426 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15427
15428 NEXT_PAGE(level3_kernel_pgt)
15429 .fill L3_START_KERNEL,8,0
15430 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15433
15434 +NEXT_PAGE(level2_vmemmap_pgt)
15435 + .fill 512,8,0
15436 +
15437 NEXT_PAGE(level2_fixmap_pgt)
15438 - .fill 506,8,0
15439 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15440 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15441 - .fill 5,8,0
15442 + .fill 507,8,0
15443 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15444 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15445 + .fill 4,8,0
15446
15447 -NEXT_PAGE(level1_fixmap_pgt)
15448 +NEXT_PAGE(level1_vsyscall_pgt)
15449 .fill 512,8,0
15450
15451 -NEXT_PAGE(level2_ident_pgt)
15452 - /* Since I easily can, map the first 1G.
15453 + /* Since I easily can, map the first 2G.
15454 * Don't set NX because code runs from these pages.
15455 */
15456 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15457 +NEXT_PAGE(level2_ident_pgt)
15458 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15459
15460 NEXT_PAGE(level2_kernel_pgt)
15461 /*
15462 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15463 * If you want to increase this then increase MODULES_VADDR
15464 * too.)
15465 */
15466 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15467 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15468 -
15469 -NEXT_PAGE(level2_spare_pgt)
15470 - .fill 512, 8, 0
15471 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15472
15473 #undef PMDS
15474 #undef NEXT_PAGE
15475
15476 - .data
15477 + .align PAGE_SIZE
15478 +ENTRY(cpu_gdt_table)
15479 + .rept NR_CPUS
15480 + .quad 0x0000000000000000 /* NULL descriptor */
15481 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15482 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15483 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15484 + .quad 0x00cffb000000ffff /* __USER32_CS */
15485 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15486 + .quad 0x00affb000000ffff /* __USER_CS */
15487 +
15488 +#ifdef CONFIG_PAX_KERNEXEC
15489 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15490 +#else
15491 + .quad 0x0 /* unused */
15492 +#endif
15493 +
15494 + .quad 0,0 /* TSS */
15495 + .quad 0,0 /* LDT */
15496 + .quad 0,0,0 /* three TLS descriptors */
15497 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15498 + /* asm/segment.h:GDT_ENTRIES must match this */
15499 +
15500 + /* zero the remaining page */
15501 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15502 + .endr
15503 +
15504 .align 16
15505 .globl early_gdt_descr
15506 early_gdt_descr:
15507 .word GDT_ENTRIES*8-1
15508 early_gdt_descr_base:
15509 - .quad INIT_PER_CPU_VAR(gdt_page)
15510 + .quad cpu_gdt_table
15511
15512 ENTRY(phys_base)
15513 /* This must match the first entry in level2_kernel_pgt */
15514 .quad 0x0000000000000000
15515
15516 #include "../../x86/xen/xen-head.S"
15517 -
15518 - .section .bss, "aw", @nobits
15519 +
15520 + .section .rodata,"a",@progbits
15521 .align L1_CACHE_BYTES
15522 ENTRY(idt_table)
15523 - .skip IDT_ENTRIES * 16
15524 + .fill 512,8,0
15525
15526 __PAGE_ALIGNED_BSS
15527 .align PAGE_SIZE
15528 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15529 index 9c3bd4a..e1d9b35 100644
15530 --- a/arch/x86/kernel/i386_ksyms_32.c
15531 +++ b/arch/x86/kernel/i386_ksyms_32.c
15532 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15533 EXPORT_SYMBOL(cmpxchg8b_emu);
15534 #endif
15535
15536 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15537 +
15538 /* Networking helper routines. */
15539 EXPORT_SYMBOL(csum_partial_copy_generic);
15540 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15541 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15542
15543 EXPORT_SYMBOL(__get_user_1);
15544 EXPORT_SYMBOL(__get_user_2);
15545 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15546
15547 EXPORT_SYMBOL(csum_partial);
15548 EXPORT_SYMBOL(empty_zero_page);
15549 +
15550 +#ifdef CONFIG_PAX_KERNEXEC
15551 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15552 +#endif
15553 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15554 index 6104852..6114160 100644
15555 --- a/arch/x86/kernel/i8259.c
15556 +++ b/arch/x86/kernel/i8259.c
15557 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15558 "spurious 8259A interrupt: IRQ%d.\n", irq);
15559 spurious_irq_mask |= irqmask;
15560 }
15561 - atomic_inc(&irq_err_count);
15562 + atomic_inc_unchecked(&irq_err_count);
15563 /*
15564 * Theoretically we do not have to handle this IRQ,
15565 * but in Linux this does not cause problems and is
15566 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15567 index 43e9ccf..44ccf6f 100644
15568 --- a/arch/x86/kernel/init_task.c
15569 +++ b/arch/x86/kernel/init_task.c
15570 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15571 * way process stacks are handled. This is done by having a special
15572 * "init_task" linker map entry..
15573 */
15574 -union thread_union init_thread_union __init_task_data =
15575 - { INIT_THREAD_INFO(init_task) };
15576 +union thread_union init_thread_union __init_task_data;
15577
15578 /*
15579 * Initial task structure.
15580 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15581 * section. Since TSS's are completely CPU-local, we want them
15582 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15583 */
15584 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15585 -
15586 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15587 +EXPORT_SYMBOL(init_tss);
15588 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15589 index 8c96897..be66bfa 100644
15590 --- a/arch/x86/kernel/ioport.c
15591 +++ b/arch/x86/kernel/ioport.c
15592 @@ -6,6 +6,7 @@
15593 #include <linux/sched.h>
15594 #include <linux/kernel.h>
15595 #include <linux/capability.h>
15596 +#include <linux/security.h>
15597 #include <linux/errno.h>
15598 #include <linux/types.h>
15599 #include <linux/ioport.h>
15600 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15601
15602 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15603 return -EINVAL;
15604 +#ifdef CONFIG_GRKERNSEC_IO
15605 + if (turn_on && grsec_disable_privio) {
15606 + gr_handle_ioperm();
15607 + return -EPERM;
15608 + }
15609 +#endif
15610 if (turn_on && !capable(CAP_SYS_RAWIO))
15611 return -EPERM;
15612
15613 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15614 * because the ->io_bitmap_max value must match the bitmap
15615 * contents:
15616 */
15617 - tss = &per_cpu(init_tss, get_cpu());
15618 + tss = init_tss + get_cpu();
15619
15620 if (turn_on)
15621 bitmap_clear(t->io_bitmap_ptr, from, num);
15622 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15623 return -EINVAL;
15624 /* Trying to gain more privileges? */
15625 if (level > old) {
15626 +#ifdef CONFIG_GRKERNSEC_IO
15627 + if (grsec_disable_privio) {
15628 + gr_handle_iopl();
15629 + return -EPERM;
15630 + }
15631 +#endif
15632 if (!capable(CAP_SYS_RAWIO))
15633 return -EPERM;
15634 }
15635 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15636 index 429e0c9..17b3ece 100644
15637 --- a/arch/x86/kernel/irq.c
15638 +++ b/arch/x86/kernel/irq.c
15639 @@ -18,7 +18,7 @@
15640 #include <asm/mce.h>
15641 #include <asm/hw_irq.h>
15642
15643 -atomic_t irq_err_count;
15644 +atomic_unchecked_t irq_err_count;
15645
15646 /* Function pointer for generic interrupt vector handling */
15647 void (*x86_platform_ipi_callback)(void) = NULL;
15648 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15649 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15650 seq_printf(p, " Machine check polls\n");
15651 #endif
15652 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15653 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15654 #if defined(CONFIG_X86_IO_APIC)
15655 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15656 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15657 #endif
15658 return 0;
15659 }
15660 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15661
15662 u64 arch_irq_stat(void)
15663 {
15664 - u64 sum = atomic_read(&irq_err_count);
15665 + u64 sum = atomic_read_unchecked(&irq_err_count);
15666
15667 #ifdef CONFIG_X86_IO_APIC
15668 - sum += atomic_read(&irq_mis_count);
15669 + sum += atomic_read_unchecked(&irq_mis_count);
15670 #endif
15671 return sum;
15672 }
15673 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15674 index 7209070..cbcd71a 100644
15675 --- a/arch/x86/kernel/irq_32.c
15676 +++ b/arch/x86/kernel/irq_32.c
15677 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15678 __asm__ __volatile__("andl %%esp,%0" :
15679 "=r" (sp) : "0" (THREAD_SIZE - 1));
15680
15681 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15682 + return sp < STACK_WARN;
15683 }
15684
15685 static void print_stack_overflow(void)
15686 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15687 * per-CPU IRQ handling contexts (thread information and stack)
15688 */
15689 union irq_ctx {
15690 - struct thread_info tinfo;
15691 - u32 stack[THREAD_SIZE/sizeof(u32)];
15692 + unsigned long previous_esp;
15693 + u32 stack[THREAD_SIZE/sizeof(u32)];
15694 } __attribute__((aligned(THREAD_SIZE)));
15695
15696 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15697 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15698 static inline int
15699 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15700 {
15701 - union irq_ctx *curctx, *irqctx;
15702 + union irq_ctx *irqctx;
15703 u32 *isp, arg1, arg2;
15704
15705 - curctx = (union irq_ctx *) current_thread_info();
15706 irqctx = __this_cpu_read(hardirq_ctx);
15707
15708 /*
15709 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15710 * handler) we can't do that and just have to keep using the
15711 * current stack (which is the irq stack already after all)
15712 */
15713 - if (unlikely(curctx == irqctx))
15714 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15715 return 0;
15716
15717 /* build the stack frame on the IRQ stack */
15718 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15719 - irqctx->tinfo.task = curctx->tinfo.task;
15720 - irqctx->tinfo.previous_esp = current_stack_pointer;
15721 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15722 + irqctx->previous_esp = current_stack_pointer;
15723
15724 - /*
15725 - * Copy the softirq bits in preempt_count so that the
15726 - * softirq checks work in the hardirq context.
15727 - */
15728 - irqctx->tinfo.preempt_count =
15729 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15730 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15731 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15732 + __set_fs(MAKE_MM_SEG(0));
15733 +#endif
15734
15735 if (unlikely(overflow))
15736 call_on_stack(print_stack_overflow, isp);
15737 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15738 : "0" (irq), "1" (desc), "2" (isp),
15739 "D" (desc->handle_irq)
15740 : "memory", "cc", "ecx");
15741 +
15742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15743 + __set_fs(current_thread_info()->addr_limit);
15744 +#endif
15745 +
15746 return 1;
15747 }
15748
15749 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15750 */
15751 void __cpuinit irq_ctx_init(int cpu)
15752 {
15753 - union irq_ctx *irqctx;
15754 -
15755 if (per_cpu(hardirq_ctx, cpu))
15756 return;
15757
15758 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15759 - THREAD_FLAGS,
15760 - THREAD_ORDER));
15761 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15762 - irqctx->tinfo.cpu = cpu;
15763 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15764 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15765 -
15766 - per_cpu(hardirq_ctx, cpu) = irqctx;
15767 -
15768 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15769 - THREAD_FLAGS,
15770 - THREAD_ORDER));
15771 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15772 - irqctx->tinfo.cpu = cpu;
15773 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15774 -
15775 - per_cpu(softirq_ctx, cpu) = irqctx;
15776 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15777 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15778
15779 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15780 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15781 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15782 asmlinkage void do_softirq(void)
15783 {
15784 unsigned long flags;
15785 - struct thread_info *curctx;
15786 union irq_ctx *irqctx;
15787 u32 *isp;
15788
15789 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15790 local_irq_save(flags);
15791
15792 if (local_softirq_pending()) {
15793 - curctx = current_thread_info();
15794 irqctx = __this_cpu_read(softirq_ctx);
15795 - irqctx->tinfo.task = curctx->task;
15796 - irqctx->tinfo.previous_esp = current_stack_pointer;
15797 + irqctx->previous_esp = current_stack_pointer;
15798
15799 /* build the stack frame on the softirq stack */
15800 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15801 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15802 +
15803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15804 + __set_fs(MAKE_MM_SEG(0));
15805 +#endif
15806
15807 call_on_stack(__do_softirq, isp);
15808 +
15809 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15810 + __set_fs(current_thread_info()->addr_limit);
15811 +#endif
15812 +
15813 /*
15814 * Shouldn't happen, we returned above if in_interrupt():
15815 */
15816 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15817 index 69bca46..0bac999 100644
15818 --- a/arch/x86/kernel/irq_64.c
15819 +++ b/arch/x86/kernel/irq_64.c
15820 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15821 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15822 u64 curbase = (u64)task_stack_page(current);
15823
15824 - if (user_mode_vm(regs))
15825 + if (user_mode(regs))
15826 return;
15827
15828 WARN_ONCE(regs->sp >= curbase &&
15829 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15830 index faba577..93b9e71 100644
15831 --- a/arch/x86/kernel/kgdb.c
15832 +++ b/arch/x86/kernel/kgdb.c
15833 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15834 #ifdef CONFIG_X86_32
15835 switch (regno) {
15836 case GDB_SS:
15837 - if (!user_mode_vm(regs))
15838 + if (!user_mode(regs))
15839 *(unsigned long *)mem = __KERNEL_DS;
15840 break;
15841 case GDB_SP:
15842 - if (!user_mode_vm(regs))
15843 + if (!user_mode(regs))
15844 *(unsigned long *)mem = kernel_stack_pointer(regs);
15845 break;
15846 case GDB_GS:
15847 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15848 case 'k':
15849 /* clear the trace bit */
15850 linux_regs->flags &= ~X86_EFLAGS_TF;
15851 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15852 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15853
15854 /* set the trace bit if we're stepping */
15855 if (remcomInBuffer[0] == 's') {
15856 linux_regs->flags |= X86_EFLAGS_TF;
15857 - atomic_set(&kgdb_cpu_doing_single_step,
15858 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15859 raw_smp_processor_id());
15860 }
15861
15862 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15863
15864 switch (cmd) {
15865 case DIE_DEBUG:
15866 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15867 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15868 if (user_mode(regs))
15869 return single_step_cont(regs, args);
15870 break;
15871 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15872 index 7da647d..56fe348 100644
15873 --- a/arch/x86/kernel/kprobes.c
15874 +++ b/arch/x86/kernel/kprobes.c
15875 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15876 } __attribute__((packed)) *insn;
15877
15878 insn = (struct __arch_relative_insn *)from;
15879 +
15880 + pax_open_kernel();
15881 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15882 insn->op = op;
15883 + pax_close_kernel();
15884 }
15885
15886 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15887 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15888 kprobe_opcode_t opcode;
15889 kprobe_opcode_t *orig_opcodes = opcodes;
15890
15891 - if (search_exception_tables((unsigned long)opcodes))
15892 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15893 return 0; /* Page fault may occur on this address. */
15894
15895 retry:
15896 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15897 }
15898 }
15899 insn_get_length(&insn);
15900 + pax_open_kernel();
15901 memcpy(dest, insn.kaddr, insn.length);
15902 + pax_close_kernel();
15903
15904 #ifdef CONFIG_X86_64
15905 if (insn_rip_relative(&insn)) {
15906 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15907 (u8 *) dest;
15908 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15909 disp = (u8 *) dest + insn_offset_displacement(&insn);
15910 + pax_open_kernel();
15911 *(s32 *) disp = (s32) newdisp;
15912 + pax_close_kernel();
15913 }
15914 #endif
15915 return insn.length;
15916 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15917 */
15918 __copy_instruction(p->ainsn.insn, p->addr, 0);
15919
15920 - if (can_boost(p->addr))
15921 + if (can_boost(ktla_ktva(p->addr)))
15922 p->ainsn.boostable = 0;
15923 else
15924 p->ainsn.boostable = -1;
15925
15926 - p->opcode = *p->addr;
15927 + p->opcode = *(ktla_ktva(p->addr));
15928 }
15929
15930 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15931 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15932 * nor set current_kprobe, because it doesn't use single
15933 * stepping.
15934 */
15935 - regs->ip = (unsigned long)p->ainsn.insn;
15936 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15937 preempt_enable_no_resched();
15938 return;
15939 }
15940 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15941 if (p->opcode == BREAKPOINT_INSTRUCTION)
15942 regs->ip = (unsigned long)p->addr;
15943 else
15944 - regs->ip = (unsigned long)p->ainsn.insn;
15945 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15946 }
15947
15948 /*
15949 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15950 setup_singlestep(p, regs, kcb, 0);
15951 return 1;
15952 }
15953 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15954 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15955 /*
15956 * The breakpoint instruction was removed right
15957 * after we hit it. Another cpu has removed
15958 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15959 " movq %rax, 152(%rsp)\n"
15960 RESTORE_REGS_STRING
15961 " popfq\n"
15962 +#ifdef KERNEXEC_PLUGIN
15963 + " btsq $63,(%rsp)\n"
15964 +#endif
15965 #else
15966 " pushf\n"
15967 SAVE_REGS_STRING
15968 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15969 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15970 {
15971 unsigned long *tos = stack_addr(regs);
15972 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15973 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15974 unsigned long orig_ip = (unsigned long)p->addr;
15975 kprobe_opcode_t *insn = p->ainsn.insn;
15976
15977 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15978 struct die_args *args = data;
15979 int ret = NOTIFY_DONE;
15980
15981 - if (args->regs && user_mode_vm(args->regs))
15982 + if (args->regs && user_mode(args->regs))
15983 return ret;
15984
15985 switch (val) {
15986 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15987 * Verify if the address gap is in 2GB range, because this uses
15988 * a relative jump.
15989 */
15990 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15991 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15992 if (abs(rel) > 0x7fffffff)
15993 return -ERANGE;
15994
15995 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15996 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15997
15998 /* Set probe function call */
15999 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
16000 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
16001
16002 /* Set returning jmp instruction at the tail of out-of-line buffer */
16003 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
16004 - (u8 *)op->kp.addr + op->optinsn.size);
16005 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
16006
16007 flush_icache_range((unsigned long) buf,
16008 (unsigned long) buf + TMPL_END_IDX +
16009 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
16010 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
16011
16012 /* Backup instructions which will be replaced by jump address */
16013 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
16014 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
16015 RELATIVE_ADDR_SIZE);
16016
16017 insn_buf[0] = RELATIVEJUMP_OPCODE;
16018 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
16019 index a9c2116..a52d4fc 100644
16020 --- a/arch/x86/kernel/kvm.c
16021 +++ b/arch/x86/kernel/kvm.c
16022 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
16023 pv_mmu_ops.set_pud = kvm_set_pud;
16024 #if PAGETABLE_LEVELS == 4
16025 pv_mmu_ops.set_pgd = kvm_set_pgd;
16026 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
16027 #endif
16028 #endif
16029 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
16030 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
16031 index ea69726..604d066 100644
16032 --- a/arch/x86/kernel/ldt.c
16033 +++ b/arch/x86/kernel/ldt.c
16034 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
16035 if (reload) {
16036 #ifdef CONFIG_SMP
16037 preempt_disable();
16038 - load_LDT(pc);
16039 + load_LDT_nolock(pc);
16040 if (!cpumask_equal(mm_cpumask(current->mm),
16041 cpumask_of(smp_processor_id())))
16042 smp_call_function(flush_ldt, current->mm, 1);
16043 preempt_enable();
16044 #else
16045 - load_LDT(pc);
16046 + load_LDT_nolock(pc);
16047 #endif
16048 }
16049 if (oldsize) {
16050 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
16051 return err;
16052
16053 for (i = 0; i < old->size; i++)
16054 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
16055 + write_ldt_entry(new->ldt, i, old->ldt + i);
16056 return 0;
16057 }
16058
16059 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
16060 retval = copy_ldt(&mm->context, &old_mm->context);
16061 mutex_unlock(&old_mm->context.lock);
16062 }
16063 +
16064 + if (tsk == current) {
16065 + mm->context.vdso = 0;
16066 +
16067 +#ifdef CONFIG_X86_32
16068 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16069 + mm->context.user_cs_base = 0UL;
16070 + mm->context.user_cs_limit = ~0UL;
16071 +
16072 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16073 + cpus_clear(mm->context.cpu_user_cs_mask);
16074 +#endif
16075 +
16076 +#endif
16077 +#endif
16078 +
16079 + }
16080 +
16081 return retval;
16082 }
16083
16084 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
16085 }
16086 }
16087
16088 +#ifdef CONFIG_PAX_SEGMEXEC
16089 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
16090 + error = -EINVAL;
16091 + goto out_unlock;
16092 + }
16093 +#endif
16094 +
16095 fill_ldt(&ldt, &ldt_info);
16096 if (oldmode)
16097 ldt.avl = 0;
16098 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
16099 index a3fa43b..8966f4c 100644
16100 --- a/arch/x86/kernel/machine_kexec_32.c
16101 +++ b/arch/x86/kernel/machine_kexec_32.c
16102 @@ -27,7 +27,7 @@
16103 #include <asm/cacheflush.h>
16104 #include <asm/debugreg.h>
16105
16106 -static void set_idt(void *newidt, __u16 limit)
16107 +static void set_idt(struct desc_struct *newidt, __u16 limit)
16108 {
16109 struct desc_ptr curidt;
16110
16111 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
16112 }
16113
16114
16115 -static void set_gdt(void *newgdt, __u16 limit)
16116 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
16117 {
16118 struct desc_ptr curgdt;
16119
16120 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
16121 }
16122
16123 control_page = page_address(image->control_code_page);
16124 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
16125 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
16126
16127 relocate_kernel_ptr = control_page;
16128 page_list[PA_CONTROL_PAGE] = __pa(control_page);
16129 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
16130 index 3ca42d0..7cff8cc 100644
16131 --- a/arch/x86/kernel/microcode_intel.c
16132 +++ b/arch/x86/kernel/microcode_intel.c
16133 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
16134
16135 static int get_ucode_user(void *to, const void *from, size_t n)
16136 {
16137 - return copy_from_user(to, from, n);
16138 + return copy_from_user(to, (const void __force_user *)from, n);
16139 }
16140
16141 static enum ucode_state
16142 request_microcode_user(int cpu, const void __user *buf, size_t size)
16143 {
16144 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
16145 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
16146 }
16147
16148 static void microcode_fini_cpu(int cpu)
16149 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
16150 index 925179f..267ac7a 100644
16151 --- a/arch/x86/kernel/module.c
16152 +++ b/arch/x86/kernel/module.c
16153 @@ -36,15 +36,60 @@
16154 #define DEBUGP(fmt...)
16155 #endif
16156
16157 -void *module_alloc(unsigned long size)
16158 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
16159 {
16160 - if (PAGE_ALIGN(size) > MODULES_LEN)
16161 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
16162 return NULL;
16163 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
16164 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
16165 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
16166 -1, __builtin_return_address(0));
16167 }
16168
16169 +void *module_alloc(unsigned long size)
16170 +{
16171 +
16172 +#ifdef CONFIG_PAX_KERNEXEC
16173 + return __module_alloc(size, PAGE_KERNEL);
16174 +#else
16175 + return __module_alloc(size, PAGE_KERNEL_EXEC);
16176 +#endif
16177 +
16178 +}
16179 +
16180 +#ifdef CONFIG_PAX_KERNEXEC
16181 +#ifdef CONFIG_X86_32
16182 +void *module_alloc_exec(unsigned long size)
16183 +{
16184 + struct vm_struct *area;
16185 +
16186 + if (size == 0)
16187 + return NULL;
16188 +
16189 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
16190 + return area ? area->addr : NULL;
16191 +}
16192 +EXPORT_SYMBOL(module_alloc_exec);
16193 +
16194 +void module_free_exec(struct module *mod, void *module_region)
16195 +{
16196 + vunmap(module_region);
16197 +}
16198 +EXPORT_SYMBOL(module_free_exec);
16199 +#else
16200 +void module_free_exec(struct module *mod, void *module_region)
16201 +{
16202 + module_free(mod, module_region);
16203 +}
16204 +EXPORT_SYMBOL(module_free_exec);
16205 +
16206 +void *module_alloc_exec(unsigned long size)
16207 +{
16208 + return __module_alloc(size, PAGE_KERNEL_RX);
16209 +}
16210 +EXPORT_SYMBOL(module_alloc_exec);
16211 +#endif
16212 +#endif
16213 +
16214 #ifdef CONFIG_X86_32
16215 int apply_relocate(Elf32_Shdr *sechdrs,
16216 const char *strtab,
16217 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16218 unsigned int i;
16219 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
16220 Elf32_Sym *sym;
16221 - uint32_t *location;
16222 + uint32_t *plocation, location;
16223
16224 DEBUGP("Applying relocate section %u to %u\n", relsec,
16225 sechdrs[relsec].sh_info);
16226 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
16227 /* This is where to make the change */
16228 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
16229 - + rel[i].r_offset;
16230 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
16231 + location = (uint32_t)plocation;
16232 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
16233 + plocation = ktla_ktva((void *)plocation);
16234 /* This is the symbol it is referring to. Note that all
16235 undefined symbols have been resolved. */
16236 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16237 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16238 switch (ELF32_R_TYPE(rel[i].r_info)) {
16239 case R_386_32:
16240 /* We add the value into the location given */
16241 - *location += sym->st_value;
16242 + pax_open_kernel();
16243 + *plocation += sym->st_value;
16244 + pax_close_kernel();
16245 break;
16246 case R_386_PC32:
16247 /* Add the value, subtract its postition */
16248 - *location += sym->st_value - (uint32_t)location;
16249 + pax_open_kernel();
16250 + *plocation += sym->st_value - location;
16251 + pax_close_kernel();
16252 break;
16253 default:
16254 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16255 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16256 case R_X86_64_NONE:
16257 break;
16258 case R_X86_64_64:
16259 + pax_open_kernel();
16260 *(u64 *)loc = val;
16261 + pax_close_kernel();
16262 break;
16263 case R_X86_64_32:
16264 + pax_open_kernel();
16265 *(u32 *)loc = val;
16266 + pax_close_kernel();
16267 if (val != *(u32 *)loc)
16268 goto overflow;
16269 break;
16270 case R_X86_64_32S:
16271 + pax_open_kernel();
16272 *(s32 *)loc = val;
16273 + pax_close_kernel();
16274 if ((s64)val != *(s32 *)loc)
16275 goto overflow;
16276 break;
16277 case R_X86_64_PC32:
16278 val -= (u64)loc;
16279 + pax_open_kernel();
16280 *(u32 *)loc = val;
16281 + pax_close_kernel();
16282 +
16283 #if 0
16284 if ((s64)val != *(s32 *)loc)
16285 goto overflow;
16286 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16287 index e88f37b..1353db6 100644
16288 --- a/arch/x86/kernel/nmi.c
16289 +++ b/arch/x86/kernel/nmi.c
16290 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16291 dotraplinkage notrace __kprobes void
16292 do_nmi(struct pt_regs *regs, long error_code)
16293 {
16294 +
16295 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16296 + if (!user_mode(regs)) {
16297 + unsigned long cs = regs->cs & 0xFFFF;
16298 + unsigned long ip = ktva_ktla(regs->ip);
16299 +
16300 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16301 + regs->ip = ip;
16302 + }
16303 +#endif
16304 +
16305 nmi_enter();
16306
16307 inc_irq_stat(__nmi_count);
16308 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16309 index 676b8c7..870ba04 100644
16310 --- a/arch/x86/kernel/paravirt-spinlocks.c
16311 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16312 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16313 arch_spin_lock(lock);
16314 }
16315
16316 -struct pv_lock_ops pv_lock_ops = {
16317 +struct pv_lock_ops pv_lock_ops __read_only = {
16318 #ifdef CONFIG_SMP
16319 .spin_is_locked = __ticket_spin_is_locked,
16320 .spin_is_contended = __ticket_spin_is_contended,
16321 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16322 index d90272e..6bb013b 100644
16323 --- a/arch/x86/kernel/paravirt.c
16324 +++ b/arch/x86/kernel/paravirt.c
16325 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16326 {
16327 return x;
16328 }
16329 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16330 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16331 +#endif
16332
16333 void __init default_banner(void)
16334 {
16335 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16336 if (opfunc == NULL)
16337 /* If there's no function, patch it with a ud2a (BUG) */
16338 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16339 - else if (opfunc == _paravirt_nop)
16340 + else if (opfunc == (void *)_paravirt_nop)
16341 /* If the operation is a nop, then nop the callsite */
16342 ret = paravirt_patch_nop();
16343
16344 /* identity functions just return their single argument */
16345 - else if (opfunc == _paravirt_ident_32)
16346 + else if (opfunc == (void *)_paravirt_ident_32)
16347 ret = paravirt_patch_ident_32(insnbuf, len);
16348 - else if (opfunc == _paravirt_ident_64)
16349 + else if (opfunc == (void *)_paravirt_ident_64)
16350 ret = paravirt_patch_ident_64(insnbuf, len);
16351 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16352 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16353 + ret = paravirt_patch_ident_64(insnbuf, len);
16354 +#endif
16355
16356 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16357 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16358 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16359 if (insn_len > len || start == NULL)
16360 insn_len = len;
16361 else
16362 - memcpy(insnbuf, start, insn_len);
16363 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16364
16365 return insn_len;
16366 }
16367 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16368 preempt_enable();
16369 }
16370
16371 -struct pv_info pv_info = {
16372 +struct pv_info pv_info __read_only = {
16373 .name = "bare hardware",
16374 .paravirt_enabled = 0,
16375 .kernel_rpl = 0,
16376 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16377 #endif
16378 };
16379
16380 -struct pv_init_ops pv_init_ops = {
16381 +struct pv_init_ops pv_init_ops __read_only = {
16382 .patch = native_patch,
16383 };
16384
16385 -struct pv_time_ops pv_time_ops = {
16386 +struct pv_time_ops pv_time_ops __read_only = {
16387 .sched_clock = native_sched_clock,
16388 .steal_clock = native_steal_clock,
16389 };
16390
16391 -struct pv_irq_ops pv_irq_ops = {
16392 +struct pv_irq_ops pv_irq_ops __read_only = {
16393 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16394 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16395 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16396 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16397 #endif
16398 };
16399
16400 -struct pv_cpu_ops pv_cpu_ops = {
16401 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16402 .cpuid = native_cpuid,
16403 .get_debugreg = native_get_debugreg,
16404 .set_debugreg = native_set_debugreg,
16405 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16406 .end_context_switch = paravirt_nop,
16407 };
16408
16409 -struct pv_apic_ops pv_apic_ops = {
16410 +struct pv_apic_ops pv_apic_ops __read_only = {
16411 #ifdef CONFIG_X86_LOCAL_APIC
16412 .startup_ipi_hook = paravirt_nop,
16413 #endif
16414 };
16415
16416 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16417 +#ifdef CONFIG_X86_32
16418 +#ifdef CONFIG_X86_PAE
16419 +/* 64-bit pagetable entries */
16420 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16421 +#else
16422 /* 32-bit pagetable entries */
16423 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16424 +#endif
16425 #else
16426 /* 64-bit pagetable entries */
16427 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16428 #endif
16429
16430 -struct pv_mmu_ops pv_mmu_ops = {
16431 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16432
16433 .read_cr2 = native_read_cr2,
16434 .write_cr2 = native_write_cr2,
16435 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16436 .make_pud = PTE_IDENT,
16437
16438 .set_pgd = native_set_pgd,
16439 + .set_pgd_batched = native_set_pgd_batched,
16440 #endif
16441 #endif /* PAGETABLE_LEVELS >= 3 */
16442
16443 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16444 },
16445
16446 .set_fixmap = native_set_fixmap,
16447 +
16448 +#ifdef CONFIG_PAX_KERNEXEC
16449 + .pax_open_kernel = native_pax_open_kernel,
16450 + .pax_close_kernel = native_pax_close_kernel,
16451 +#endif
16452 +
16453 };
16454
16455 EXPORT_SYMBOL_GPL(pv_time_ops);
16456 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16457 index 35ccf75..7a15747 100644
16458 --- a/arch/x86/kernel/pci-iommu_table.c
16459 +++ b/arch/x86/kernel/pci-iommu_table.c
16460 @@ -2,7 +2,7 @@
16461 #include <asm/iommu_table.h>
16462 #include <linux/string.h>
16463 #include <linux/kallsyms.h>
16464 -
16465 +#include <linux/sched.h>
16466
16467 #define DEBUG 1
16468
16469 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16470 index ee5d4fb..426649b 100644
16471 --- a/arch/x86/kernel/process.c
16472 +++ b/arch/x86/kernel/process.c
16473 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16474
16475 void free_thread_info(struct thread_info *ti)
16476 {
16477 - free_thread_xstate(ti->task);
16478 free_pages((unsigned long)ti, THREAD_ORDER);
16479 }
16480
16481 +static struct kmem_cache *task_struct_cachep;
16482 +
16483 void arch_task_cache_init(void)
16484 {
16485 - task_xstate_cachep =
16486 - kmem_cache_create("task_xstate", xstate_size,
16487 + /* create a slab on which task_structs can be allocated */
16488 + task_struct_cachep =
16489 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16490 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16491 +
16492 + task_xstate_cachep =
16493 + kmem_cache_create("task_xstate", xstate_size,
16494 __alignof__(union thread_xstate),
16495 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16496 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16497 +}
16498 +
16499 +struct task_struct *alloc_task_struct_node(int node)
16500 +{
16501 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16502 +}
16503 +
16504 +void free_task_struct(struct task_struct *task)
16505 +{
16506 + free_thread_xstate(task);
16507 + kmem_cache_free(task_struct_cachep, task);
16508 }
16509
16510 /*
16511 @@ -70,7 +87,7 @@ void exit_thread(void)
16512 unsigned long *bp = t->io_bitmap_ptr;
16513
16514 if (bp) {
16515 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16516 + struct tss_struct *tss = init_tss + get_cpu();
16517
16518 t->io_bitmap_ptr = NULL;
16519 clear_thread_flag(TIF_IO_BITMAP);
16520 @@ -106,7 +123,7 @@ void show_regs_common(void)
16521
16522 printk(KERN_CONT "\n");
16523 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16524 - current->pid, current->comm, print_tainted(),
16525 + task_pid_nr(current), current->comm, print_tainted(),
16526 init_utsname()->release,
16527 (int)strcspn(init_utsname()->version, " "),
16528 init_utsname()->version);
16529 @@ -120,6 +137,9 @@ void flush_thread(void)
16530 {
16531 struct task_struct *tsk = current;
16532
16533 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16534 + loadsegment(gs, 0);
16535 +#endif
16536 flush_ptrace_hw_breakpoint(tsk);
16537 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16538 /*
16539 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16540 regs.di = (unsigned long) arg;
16541
16542 #ifdef CONFIG_X86_32
16543 - regs.ds = __USER_DS;
16544 - regs.es = __USER_DS;
16545 + regs.ds = __KERNEL_DS;
16546 + regs.es = __KERNEL_DS;
16547 regs.fs = __KERNEL_PERCPU;
16548 - regs.gs = __KERNEL_STACK_CANARY;
16549 + savesegment(gs, regs.gs);
16550 #else
16551 regs.ss = __KERNEL_DS;
16552 #endif
16553 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16554
16555 return ret;
16556 }
16557 -void stop_this_cpu(void *dummy)
16558 +__noreturn void stop_this_cpu(void *dummy)
16559 {
16560 local_irq_disable();
16561 /*
16562 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16563 }
16564 early_param("idle", idle_setup);
16565
16566 -unsigned long arch_align_stack(unsigned long sp)
16567 +#ifdef CONFIG_PAX_RANDKSTACK
16568 +void pax_randomize_kstack(struct pt_regs *regs)
16569 {
16570 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16571 - sp -= get_random_int() % 8192;
16572 - return sp & ~0xf;
16573 -}
16574 + struct thread_struct *thread = &current->thread;
16575 + unsigned long time;
16576
16577 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16578 -{
16579 - unsigned long range_end = mm->brk + 0x02000000;
16580 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16581 -}
16582 + if (!randomize_va_space)
16583 + return;
16584 +
16585 + if (v8086_mode(regs))
16586 + return;
16587
16588 + rdtscl(time);
16589 +
16590 + /* P4 seems to return a 0 LSB, ignore it */
16591 +#ifdef CONFIG_MPENTIUM4
16592 + time &= 0x3EUL;
16593 + time <<= 2;
16594 +#elif defined(CONFIG_X86_64)
16595 + time &= 0xFUL;
16596 + time <<= 4;
16597 +#else
16598 + time &= 0x1FUL;
16599 + time <<= 3;
16600 +#endif
16601 +
16602 + thread->sp0 ^= time;
16603 + load_sp0(init_tss + smp_processor_id(), thread);
16604 +
16605 +#ifdef CONFIG_X86_64
16606 + percpu_write(kernel_stack, thread->sp0);
16607 +#endif
16608 +}
16609 +#endif
16610 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16611 index 8598296..bfadef0 100644
16612 --- a/arch/x86/kernel/process_32.c
16613 +++ b/arch/x86/kernel/process_32.c
16614 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16615 unsigned long thread_saved_pc(struct task_struct *tsk)
16616 {
16617 return ((unsigned long *)tsk->thread.sp)[3];
16618 +//XXX return tsk->thread.eip;
16619 }
16620
16621 #ifndef CONFIG_SMP
16622 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16623 unsigned long sp;
16624 unsigned short ss, gs;
16625
16626 - if (user_mode_vm(regs)) {
16627 + if (user_mode(regs)) {
16628 sp = regs->sp;
16629 ss = regs->ss & 0xffff;
16630 - gs = get_user_gs(regs);
16631 } else {
16632 sp = kernel_stack_pointer(regs);
16633 savesegment(ss, ss);
16634 - savesegment(gs, gs);
16635 }
16636 + gs = get_user_gs(regs);
16637
16638 show_regs_common();
16639
16640 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16641 struct task_struct *tsk;
16642 int err;
16643
16644 - childregs = task_pt_regs(p);
16645 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16646 *childregs = *regs;
16647 childregs->ax = 0;
16648 childregs->sp = sp;
16649
16650 p->thread.sp = (unsigned long) childregs;
16651 p->thread.sp0 = (unsigned long) (childregs+1);
16652 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16653
16654 p->thread.ip = (unsigned long) ret_from_fork;
16655
16656 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16657 struct thread_struct *prev = &prev_p->thread,
16658 *next = &next_p->thread;
16659 int cpu = smp_processor_id();
16660 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16661 + struct tss_struct *tss = init_tss + cpu;
16662 fpu_switch_t fpu;
16663
16664 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16665 @@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16666 */
16667 lazy_save_gs(prev->gs);
16668
16669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16670 + __set_fs(task_thread_info(next_p)->addr_limit);
16671 +#endif
16672 +
16673 /*
16674 * Load the per-thread Thread-Local Storage descriptor.
16675 */
16676 @@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16677 */
16678 arch_end_context_switch(next_p);
16679
16680 + percpu_write(current_task, next_p);
16681 + percpu_write(current_tinfo, &next_p->tinfo);
16682 +
16683 /*
16684 * Restore %gs if needed (which is common)
16685 */
16686 @@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16687
16688 switch_fpu_finish(next_p, fpu);
16689
16690 - percpu_write(current_task, next_p);
16691 -
16692 return prev_p;
16693 }
16694
16695 @@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
16696 } while (count++ < 16);
16697 return 0;
16698 }
16699 -
16700 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16701 index 6a364a6..b147d11 100644
16702 --- a/arch/x86/kernel/process_64.c
16703 +++ b/arch/x86/kernel/process_64.c
16704 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16705 void exit_idle(void)
16706 {
16707 /* idle loop has pid 0 */
16708 - if (current->pid)
16709 + if (task_pid_nr(current))
16710 return;
16711 __exit_idle();
16712 }
16713 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16714 struct pt_regs *childregs;
16715 struct task_struct *me = current;
16716
16717 - childregs = ((struct pt_regs *)
16718 - (THREAD_SIZE + task_stack_page(p))) - 1;
16719 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16720 *childregs = *regs;
16721
16722 childregs->ax = 0;
16723 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16724 p->thread.sp = (unsigned long) childregs;
16725 p->thread.sp0 = (unsigned long) (childregs+1);
16726 p->thread.usersp = me->thread.usersp;
16727 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16728
16729 set_tsk_thread_flag(p, TIF_FORK);
16730
16731 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16732 struct thread_struct *prev = &prev_p->thread;
16733 struct thread_struct *next = &next_p->thread;
16734 int cpu = smp_processor_id();
16735 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16736 + struct tss_struct *tss = init_tss + cpu;
16737 unsigned fsindex, gsindex;
16738 fpu_switch_t fpu;
16739
16740 @@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16741 prev->usersp = percpu_read(old_rsp);
16742 percpu_write(old_rsp, next->usersp);
16743 percpu_write(current_task, next_p);
16744 + percpu_write(current_tinfo, &next_p->tinfo);
16745
16746 - percpu_write(kernel_stack,
16747 - (unsigned long)task_stack_page(next_p) +
16748 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16749 + percpu_write(kernel_stack, next->sp0);
16750
16751 /*
16752 * Now maybe reload the debug registers and handle I/O bitmaps
16753 @@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
16754 if (!p || p == current || p->state == TASK_RUNNING)
16755 return 0;
16756 stack = (unsigned long)task_stack_page(p);
16757 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16758 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16759 return 0;
16760 fp = *(u64 *)(p->thread.sp);
16761 do {
16762 - if (fp < (unsigned long)stack ||
16763 - fp >= (unsigned long)stack+THREAD_SIZE)
16764 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16765 return 0;
16766 ip = *(u64 *)(fp+8);
16767 if (!in_sched_functions(ip))
16768 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16769 index 8252879..d3219e0 100644
16770 --- a/arch/x86/kernel/ptrace.c
16771 +++ b/arch/x86/kernel/ptrace.c
16772 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16773 unsigned long addr, unsigned long data)
16774 {
16775 int ret;
16776 - unsigned long __user *datap = (unsigned long __user *)data;
16777 + unsigned long __user *datap = (__force unsigned long __user *)data;
16778
16779 switch (request) {
16780 /* read the word at location addr in the USER area. */
16781 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16782 if ((int) addr < 0)
16783 return -EIO;
16784 ret = do_get_thread_area(child, addr,
16785 - (struct user_desc __user *)data);
16786 + (__force struct user_desc __user *) data);
16787 break;
16788
16789 case PTRACE_SET_THREAD_AREA:
16790 if ((int) addr < 0)
16791 return -EIO;
16792 ret = do_set_thread_area(child, addr,
16793 - (struct user_desc __user *)data, 0);
16794 + (__force struct user_desc __user *) data, 0);
16795 break;
16796 #endif
16797
16798 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16799 memset(info, 0, sizeof(*info));
16800 info->si_signo = SIGTRAP;
16801 info->si_code = si_code;
16802 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16803 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16804 }
16805
16806 void user_single_step_siginfo(struct task_struct *tsk,
16807 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16808 index 42eb330..139955c 100644
16809 --- a/arch/x86/kernel/pvclock.c
16810 +++ b/arch/x86/kernel/pvclock.c
16811 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16812 return pv_tsc_khz;
16813 }
16814
16815 -static atomic64_t last_value = ATOMIC64_INIT(0);
16816 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16817
16818 void pvclock_resume(void)
16819 {
16820 - atomic64_set(&last_value, 0);
16821 + atomic64_set_unchecked(&last_value, 0);
16822 }
16823
16824 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16825 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16826 * updating at the same time, and one of them could be slightly behind,
16827 * making the assumption that last_value always go forward fail to hold.
16828 */
16829 - last = atomic64_read(&last_value);
16830 + last = atomic64_read_unchecked(&last_value);
16831 do {
16832 if (ret < last)
16833 return last;
16834 - last = atomic64_cmpxchg(&last_value, last, ret);
16835 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16836 } while (unlikely(last != ret));
16837
16838 return ret;
16839 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16840 index 37a458b..e63d183 100644
16841 --- a/arch/x86/kernel/reboot.c
16842 +++ b/arch/x86/kernel/reboot.c
16843 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16844 EXPORT_SYMBOL(pm_power_off);
16845
16846 static const struct desc_ptr no_idt = {};
16847 -static int reboot_mode;
16848 +static unsigned short reboot_mode;
16849 enum reboot_type reboot_type = BOOT_ACPI;
16850 int reboot_force;
16851
16852 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16853 extern const unsigned char machine_real_restart_asm[];
16854 extern const u64 machine_real_restart_gdt[3];
16855
16856 -void machine_real_restart(unsigned int type)
16857 +__noreturn void machine_real_restart(unsigned int type)
16858 {
16859 void *restart_va;
16860 unsigned long restart_pa;
16861 - void (*restart_lowmem)(unsigned int);
16862 + void (* __noreturn restart_lowmem)(unsigned int);
16863 u64 *lowmem_gdt;
16864
16865 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16866 + struct desc_struct *gdt;
16867 +#endif
16868 +
16869 local_irq_disable();
16870
16871 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16872 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16873 boot)". This seems like a fairly standard thing that gets set by
16874 REBOOT.COM programs, and the previous reset routine did this
16875 too. */
16876 - *((unsigned short *)0x472) = reboot_mode;
16877 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16878
16879 /* Patch the GDT in the low memory trampoline */
16880 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16881
16882 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16883 restart_pa = virt_to_phys(restart_va);
16884 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16885 + restart_lowmem = (void *)restart_pa;
16886
16887 /* GDT[0]: GDT self-pointer */
16888 lowmem_gdt[0] =
16889 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16890 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16891
16892 /* Jump to the identity-mapped low memory code */
16893 +
16894 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16895 + gdt = get_cpu_gdt_table(smp_processor_id());
16896 + pax_open_kernel();
16897 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16898 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16899 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16900 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16901 +#endif
16902 +#ifdef CONFIG_PAX_KERNEXEC
16903 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16904 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16905 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16906 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16907 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16908 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16909 +#endif
16910 + pax_close_kernel();
16911 +#endif
16912 +
16913 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16914 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16915 + unreachable();
16916 +#else
16917 restart_lowmem(type);
16918 +#endif
16919 +
16920 }
16921 #ifdef CONFIG_APM_MODULE
16922 EXPORT_SYMBOL(machine_real_restart);
16923 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16924 * try to force a triple fault and then cycle between hitting the keyboard
16925 * controller and doing that
16926 */
16927 -static void native_machine_emergency_restart(void)
16928 +__noreturn static void native_machine_emergency_restart(void)
16929 {
16930 int i;
16931 int attempt = 0;
16932 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16933 #endif
16934 }
16935
16936 -static void __machine_emergency_restart(int emergency)
16937 +static __noreturn void __machine_emergency_restart(int emergency)
16938 {
16939 reboot_emergency = emergency;
16940 machine_ops.emergency_restart();
16941 }
16942
16943 -static void native_machine_restart(char *__unused)
16944 +static __noreturn void native_machine_restart(char *__unused)
16945 {
16946 printk("machine restart\n");
16947
16948 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16949 __machine_emergency_restart(0);
16950 }
16951
16952 -static void native_machine_halt(void)
16953 +static __noreturn void native_machine_halt(void)
16954 {
16955 /* stop other cpus and apics */
16956 machine_shutdown();
16957 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16958 stop_this_cpu(NULL);
16959 }
16960
16961 -static void native_machine_power_off(void)
16962 +__noreturn static void native_machine_power_off(void)
16963 {
16964 if (pm_power_off) {
16965 if (!reboot_force)
16966 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16967 }
16968 /* a fallback in case there is no PM info available */
16969 tboot_shutdown(TB_SHUTDOWN_HALT);
16970 + unreachable();
16971 }
16972
16973 struct machine_ops machine_ops = {
16974 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16975 index 7a6f3b3..bed145d7 100644
16976 --- a/arch/x86/kernel/relocate_kernel_64.S
16977 +++ b/arch/x86/kernel/relocate_kernel_64.S
16978 @@ -11,6 +11,7 @@
16979 #include <asm/kexec.h>
16980 #include <asm/processor-flags.h>
16981 #include <asm/pgtable_types.h>
16982 +#include <asm/alternative-asm.h>
16983
16984 /*
16985 * Must be relocatable PIC code callable as a C function
16986 @@ -160,13 +161,14 @@ identity_mapped:
16987 xorq %rbp, %rbp
16988 xorq %r8, %r8
16989 xorq %r9, %r9
16990 - xorq %r10, %r9
16991 + xorq %r10, %r10
16992 xorq %r11, %r11
16993 xorq %r12, %r12
16994 xorq %r13, %r13
16995 xorq %r14, %r14
16996 xorq %r15, %r15
16997
16998 + pax_force_retaddr 0, 1
16999 ret
17000
17001 1:
17002 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
17003 index cf0ef98..e3f780b 100644
17004 --- a/arch/x86/kernel/setup.c
17005 +++ b/arch/x86/kernel/setup.c
17006 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
17007
17008 switch (data->type) {
17009 case SETUP_E820_EXT:
17010 - parse_e820_ext(data);
17011 + parse_e820_ext((struct setup_data __force_kernel *)data);
17012 break;
17013 case SETUP_DTB:
17014 add_dtb(pa_data);
17015 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
17016 * area (640->1Mb) as ram even though it is not.
17017 * take them out.
17018 */
17019 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
17020 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
17021 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
17022 }
17023
17024 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
17025
17026 if (!boot_params.hdr.root_flags)
17027 root_mountflags &= ~MS_RDONLY;
17028 - init_mm.start_code = (unsigned long) _text;
17029 - init_mm.end_code = (unsigned long) _etext;
17030 + init_mm.start_code = ktla_ktva((unsigned long) _text);
17031 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
17032 init_mm.end_data = (unsigned long) _edata;
17033 init_mm.brk = _brk_end;
17034
17035 - code_resource.start = virt_to_phys(_text);
17036 - code_resource.end = virt_to_phys(_etext)-1;
17037 - data_resource.start = virt_to_phys(_etext);
17038 + code_resource.start = virt_to_phys(ktla_ktva(_text));
17039 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
17040 + data_resource.start = virt_to_phys(_sdata);
17041 data_resource.end = virt_to_phys(_edata)-1;
17042 bss_resource.start = virt_to_phys(&__bss_start);
17043 bss_resource.end = virt_to_phys(&__bss_stop)-1;
17044 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
17045 index 71f4727..16dc9f7 100644
17046 --- a/arch/x86/kernel/setup_percpu.c
17047 +++ b/arch/x86/kernel/setup_percpu.c
17048 @@ -21,19 +21,17 @@
17049 #include <asm/cpu.h>
17050 #include <asm/stackprotector.h>
17051
17052 -DEFINE_PER_CPU(int, cpu_number);
17053 +#ifdef CONFIG_SMP
17054 +DEFINE_PER_CPU(unsigned int, cpu_number);
17055 EXPORT_PER_CPU_SYMBOL(cpu_number);
17056 +#endif
17057
17058 -#ifdef CONFIG_X86_64
17059 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
17060 -#else
17061 -#define BOOT_PERCPU_OFFSET 0
17062 -#endif
17063
17064 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
17065 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
17066
17067 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
17068 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
17069 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
17070 };
17071 EXPORT_SYMBOL(__per_cpu_offset);
17072 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
17073 {
17074 #ifdef CONFIG_X86_32
17075 struct desc_struct gdt;
17076 + unsigned long base = per_cpu_offset(cpu);
17077
17078 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
17079 - 0x2 | DESCTYPE_S, 0x8);
17080 - gdt.s = 1;
17081 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
17082 + 0x83 | DESCTYPE_S, 0xC);
17083 write_gdt_entry(get_cpu_gdt_table(cpu),
17084 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
17085 #endif
17086 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
17087 /* alrighty, percpu areas up and running */
17088 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
17089 for_each_possible_cpu(cpu) {
17090 +#ifdef CONFIG_CC_STACKPROTECTOR
17091 +#ifdef CONFIG_X86_32
17092 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
17093 +#endif
17094 +#endif
17095 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
17096 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
17097 per_cpu(cpu_number, cpu) = cpu;
17098 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
17099 */
17100 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
17101 #endif
17102 +#ifdef CONFIG_CC_STACKPROTECTOR
17103 +#ifdef CONFIG_X86_32
17104 + if (!cpu)
17105 + per_cpu(stack_canary.canary, cpu) = canary;
17106 +#endif
17107 +#endif
17108 /*
17109 * Up to this point, the boot CPU has been using .init.data
17110 * area. Reload any changed state for the boot CPU.
17111 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
17112 index 54ddaeb2..22c3bdc 100644
17113 --- a/arch/x86/kernel/signal.c
17114 +++ b/arch/x86/kernel/signal.c
17115 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
17116 * Align the stack pointer according to the i386 ABI,
17117 * i.e. so that on function entry ((sp + 4) & 15) == 0.
17118 */
17119 - sp = ((sp + 4) & -16ul) - 4;
17120 + sp = ((sp - 12) & -16ul) - 4;
17121 #else /* !CONFIG_X86_32 */
17122 sp = round_down(sp, 16) - 8;
17123 #endif
17124 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
17125 * Return an always-bogus address instead so we will die with SIGSEGV.
17126 */
17127 if (onsigstack && !likely(on_sig_stack(sp)))
17128 - return (void __user *)-1L;
17129 + return (__force void __user *)-1L;
17130
17131 /* save i387 state */
17132 if (used_math() && save_i387_xstate(*fpstate) < 0)
17133 - return (void __user *)-1L;
17134 + return (__force void __user *)-1L;
17135
17136 return (void __user *)sp;
17137 }
17138 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17139 }
17140
17141 if (current->mm->context.vdso)
17142 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17143 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
17144 else
17145 - restorer = &frame->retcode;
17146 + restorer = (void __user *)&frame->retcode;
17147 if (ka->sa.sa_flags & SA_RESTORER)
17148 restorer = ka->sa.sa_restorer;
17149
17150 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
17151 * reasons and because gdb uses it as a signature to notice
17152 * signal handler stack frames.
17153 */
17154 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
17155 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
17156
17157 if (err)
17158 return -EFAULT;
17159 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17160 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
17161
17162 /* Set up to return from userspace. */
17163 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17164 + if (current->mm->context.vdso)
17165 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
17166 + else
17167 + restorer = (void __user *)&frame->retcode;
17168 if (ka->sa.sa_flags & SA_RESTORER)
17169 restorer = ka->sa.sa_restorer;
17170 put_user_ex(restorer, &frame->pretcode);
17171 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
17172 * reasons and because gdb uses it as a signature to notice
17173 * signal handler stack frames.
17174 */
17175 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
17176 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
17177 } put_user_catch(err);
17178
17179 if (err)
17180 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
17181 * X86_32: vm86 regs switched out by assembly code before reaching
17182 * here, so testing against kernel CS suffices.
17183 */
17184 - if (!user_mode(regs))
17185 + if (!user_mode_novm(regs))
17186 return;
17187
17188 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
17189 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
17190 index 9f548cb..caf76f7 100644
17191 --- a/arch/x86/kernel/smpboot.c
17192 +++ b/arch/x86/kernel/smpboot.c
17193 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
17194 set_idle_for_cpu(cpu, c_idle.idle);
17195 do_rest:
17196 per_cpu(current_task, cpu) = c_idle.idle;
17197 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
17198 #ifdef CONFIG_X86_32
17199 /* Stack for startup_32 can be just as for start_secondary onwards */
17200 irq_ctx_init(cpu);
17201 #else
17202 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
17203 initial_gs = per_cpu_offset(cpu);
17204 - per_cpu(kernel_stack, cpu) =
17205 - (unsigned long)task_stack_page(c_idle.idle) -
17206 - KERNEL_STACK_OFFSET + THREAD_SIZE;
17207 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
17208 #endif
17209 +
17210 + pax_open_kernel();
17211 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17212 + pax_close_kernel();
17213 +
17214 initial_code = (unsigned long)start_secondary;
17215 stack_start = c_idle.idle->thread.sp;
17216
17217 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
17218
17219 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
17220
17221 +#ifdef CONFIG_PAX_PER_CPU_PGD
17222 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
17223 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
17224 + KERNEL_PGD_PTRS);
17225 +#endif
17226 +
17227 err = do_boot_cpu(apicid, cpu);
17228 if (err) {
17229 pr_debug("do_boot_cpu failed %d\n", err);
17230 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
17231 index c346d11..d43b163 100644
17232 --- a/arch/x86/kernel/step.c
17233 +++ b/arch/x86/kernel/step.c
17234 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17235 struct desc_struct *desc;
17236 unsigned long base;
17237
17238 - seg &= ~7UL;
17239 + seg >>= 3;
17240
17241 mutex_lock(&child->mm->context.lock);
17242 - if (unlikely((seg >> 3) >= child->mm->context.size))
17243 + if (unlikely(seg >= child->mm->context.size))
17244 addr = -1L; /* bogus selector, access would fault */
17245 else {
17246 desc = child->mm->context.ldt + seg;
17247 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17248 addr += base;
17249 }
17250 mutex_unlock(&child->mm->context.lock);
17251 - }
17252 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17253 + addr = ktla_ktva(addr);
17254
17255 return addr;
17256 }
17257 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17258 unsigned char opcode[15];
17259 unsigned long addr = convert_ip_to_linear(child, regs);
17260
17261 + if (addr == -EINVAL)
17262 + return 0;
17263 +
17264 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17265 for (i = 0; i < copied; i++) {
17266 switch (opcode[i]) {
17267 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17268 index 0b0cb5f..db6b9ed 100644
17269 --- a/arch/x86/kernel/sys_i386_32.c
17270 +++ b/arch/x86/kernel/sys_i386_32.c
17271 @@ -24,17 +24,224 @@
17272
17273 #include <asm/syscalls.h>
17274
17275 -/*
17276 - * Do a system call from kernel instead of calling sys_execve so we
17277 - * end up with proper pt_regs.
17278 - */
17279 -int kernel_execve(const char *filename,
17280 - const char *const argv[],
17281 - const char *const envp[])
17282 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17283 {
17284 - long __res;
17285 - asm volatile ("int $0x80"
17286 - : "=a" (__res)
17287 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17288 - return __res;
17289 + unsigned long pax_task_size = TASK_SIZE;
17290 +
17291 +#ifdef CONFIG_PAX_SEGMEXEC
17292 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17293 + pax_task_size = SEGMEXEC_TASK_SIZE;
17294 +#endif
17295 +
17296 + if (len > pax_task_size || addr > pax_task_size - len)
17297 + return -EINVAL;
17298 +
17299 + return 0;
17300 +}
17301 +
17302 +unsigned long
17303 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17304 + unsigned long len, unsigned long pgoff, unsigned long flags)
17305 +{
17306 + struct mm_struct *mm = current->mm;
17307 + struct vm_area_struct *vma;
17308 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17309 +
17310 +#ifdef CONFIG_PAX_SEGMEXEC
17311 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17312 + pax_task_size = SEGMEXEC_TASK_SIZE;
17313 +#endif
17314 +
17315 + pax_task_size -= PAGE_SIZE;
17316 +
17317 + if (len > pax_task_size)
17318 + return -ENOMEM;
17319 +
17320 + if (flags & MAP_FIXED)
17321 + return addr;
17322 +
17323 +#ifdef CONFIG_PAX_RANDMMAP
17324 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17325 +#endif
17326 +
17327 + if (addr) {
17328 + addr = PAGE_ALIGN(addr);
17329 + if (pax_task_size - len >= addr) {
17330 + vma = find_vma(mm, addr);
17331 + if (check_heap_stack_gap(vma, addr, len))
17332 + return addr;
17333 + }
17334 + }
17335 + if (len > mm->cached_hole_size) {
17336 + start_addr = addr = mm->free_area_cache;
17337 + } else {
17338 + start_addr = addr = mm->mmap_base;
17339 + mm->cached_hole_size = 0;
17340 + }
17341 +
17342 +#ifdef CONFIG_PAX_PAGEEXEC
17343 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17344 + start_addr = 0x00110000UL;
17345 +
17346 +#ifdef CONFIG_PAX_RANDMMAP
17347 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17348 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17349 +#endif
17350 +
17351 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17352 + start_addr = addr = mm->mmap_base;
17353 + else
17354 + addr = start_addr;
17355 + }
17356 +#endif
17357 +
17358 +full_search:
17359 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17360 + /* At this point: (!vma || addr < vma->vm_end). */
17361 + if (pax_task_size - len < addr) {
17362 + /*
17363 + * Start a new search - just in case we missed
17364 + * some holes.
17365 + */
17366 + if (start_addr != mm->mmap_base) {
17367 + start_addr = addr = mm->mmap_base;
17368 + mm->cached_hole_size = 0;
17369 + goto full_search;
17370 + }
17371 + return -ENOMEM;
17372 + }
17373 + if (check_heap_stack_gap(vma, addr, len))
17374 + break;
17375 + if (addr + mm->cached_hole_size < vma->vm_start)
17376 + mm->cached_hole_size = vma->vm_start - addr;
17377 + addr = vma->vm_end;
17378 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17379 + start_addr = addr = mm->mmap_base;
17380 + mm->cached_hole_size = 0;
17381 + goto full_search;
17382 + }
17383 + }
17384 +
17385 + /*
17386 + * Remember the place where we stopped the search:
17387 + */
17388 + mm->free_area_cache = addr + len;
17389 + return addr;
17390 +}
17391 +
17392 +unsigned long
17393 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17394 + const unsigned long len, const unsigned long pgoff,
17395 + const unsigned long flags)
17396 +{
17397 + struct vm_area_struct *vma;
17398 + struct mm_struct *mm = current->mm;
17399 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17400 +
17401 +#ifdef CONFIG_PAX_SEGMEXEC
17402 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17403 + pax_task_size = SEGMEXEC_TASK_SIZE;
17404 +#endif
17405 +
17406 + pax_task_size -= PAGE_SIZE;
17407 +
17408 + /* requested length too big for entire address space */
17409 + if (len > pax_task_size)
17410 + return -ENOMEM;
17411 +
17412 + if (flags & MAP_FIXED)
17413 + return addr;
17414 +
17415 +#ifdef CONFIG_PAX_PAGEEXEC
17416 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17417 + goto bottomup;
17418 +#endif
17419 +
17420 +#ifdef CONFIG_PAX_RANDMMAP
17421 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17422 +#endif
17423 +
17424 + /* requesting a specific address */
17425 + if (addr) {
17426 + addr = PAGE_ALIGN(addr);
17427 + if (pax_task_size - len >= addr) {
17428 + vma = find_vma(mm, addr);
17429 + if (check_heap_stack_gap(vma, addr, len))
17430 + return addr;
17431 + }
17432 + }
17433 +
17434 + /* check if free_area_cache is useful for us */
17435 + if (len <= mm->cached_hole_size) {
17436 + mm->cached_hole_size = 0;
17437 + mm->free_area_cache = mm->mmap_base;
17438 + }
17439 +
17440 + /* either no address requested or can't fit in requested address hole */
17441 + addr = mm->free_area_cache;
17442 +
17443 + /* make sure it can fit in the remaining address space */
17444 + if (addr > len) {
17445 + vma = find_vma(mm, addr-len);
17446 + if (check_heap_stack_gap(vma, addr - len, len))
17447 + /* remember the address as a hint for next time */
17448 + return (mm->free_area_cache = addr-len);
17449 + }
17450 +
17451 + if (mm->mmap_base < len)
17452 + goto bottomup;
17453 +
17454 + addr = mm->mmap_base-len;
17455 +
17456 + do {
17457 + /*
17458 + * Lookup failure means no vma is above this address,
17459 + * else if new region fits below vma->vm_start,
17460 + * return with success:
17461 + */
17462 + vma = find_vma(mm, addr);
17463 + if (check_heap_stack_gap(vma, addr, len))
17464 + /* remember the address as a hint for next time */
17465 + return (mm->free_area_cache = addr);
17466 +
17467 + /* remember the largest hole we saw so far */
17468 + if (addr + mm->cached_hole_size < vma->vm_start)
17469 + mm->cached_hole_size = vma->vm_start - addr;
17470 +
17471 + /* try just below the current vma->vm_start */
17472 + addr = skip_heap_stack_gap(vma, len);
17473 + } while (!IS_ERR_VALUE(addr));
17474 +
17475 +bottomup:
17476 + /*
17477 + * A failed mmap() very likely causes application failure,
17478 + * so fall back to the bottom-up function here. This scenario
17479 + * can happen with large stack limits and large mmap()
17480 + * allocations.
17481 + */
17482 +
17483 +#ifdef CONFIG_PAX_SEGMEXEC
17484 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17485 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17486 + else
17487 +#endif
17488 +
17489 + mm->mmap_base = TASK_UNMAPPED_BASE;
17490 +
17491 +#ifdef CONFIG_PAX_RANDMMAP
17492 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17493 + mm->mmap_base += mm->delta_mmap;
17494 +#endif
17495 +
17496 + mm->free_area_cache = mm->mmap_base;
17497 + mm->cached_hole_size = ~0UL;
17498 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17499 + /*
17500 + * Restore the topdown base:
17501 + */
17502 + mm->mmap_base = base;
17503 + mm->free_area_cache = base;
17504 + mm->cached_hole_size = ~0UL;
17505 +
17506 + return addr;
17507 }
17508 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17509 index 0514890..3dbebce 100644
17510 --- a/arch/x86/kernel/sys_x86_64.c
17511 +++ b/arch/x86/kernel/sys_x86_64.c
17512 @@ -95,8 +95,8 @@ out:
17513 return error;
17514 }
17515
17516 -static void find_start_end(unsigned long flags, unsigned long *begin,
17517 - unsigned long *end)
17518 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17519 + unsigned long *begin, unsigned long *end)
17520 {
17521 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17522 unsigned long new_begin;
17523 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17524 *begin = new_begin;
17525 }
17526 } else {
17527 - *begin = TASK_UNMAPPED_BASE;
17528 + *begin = mm->mmap_base;
17529 *end = TASK_SIZE;
17530 }
17531 }
17532 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17533 if (flags & MAP_FIXED)
17534 return addr;
17535
17536 - find_start_end(flags, &begin, &end);
17537 + find_start_end(mm, flags, &begin, &end);
17538
17539 if (len > end)
17540 return -ENOMEM;
17541
17542 +#ifdef CONFIG_PAX_RANDMMAP
17543 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17544 +#endif
17545 +
17546 if (addr) {
17547 addr = PAGE_ALIGN(addr);
17548 vma = find_vma(mm, addr);
17549 - if (end - len >= addr &&
17550 - (!vma || addr + len <= vma->vm_start))
17551 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17552 return addr;
17553 }
17554 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17555 @@ -172,7 +175,7 @@ full_search:
17556 }
17557 return -ENOMEM;
17558 }
17559 - if (!vma || addr + len <= vma->vm_start) {
17560 + if (check_heap_stack_gap(vma, addr, len)) {
17561 /*
17562 * Remember the place where we stopped the search:
17563 */
17564 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17565 {
17566 struct vm_area_struct *vma;
17567 struct mm_struct *mm = current->mm;
17568 - unsigned long addr = addr0;
17569 + unsigned long base = mm->mmap_base, addr = addr0;
17570
17571 /* requested length too big for entire address space */
17572 if (len > TASK_SIZE)
17573 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17574 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17575 goto bottomup;
17576
17577 +#ifdef CONFIG_PAX_RANDMMAP
17578 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17579 +#endif
17580 +
17581 /* requesting a specific address */
17582 if (addr) {
17583 addr = PAGE_ALIGN(addr);
17584 - vma = find_vma(mm, addr);
17585 - if (TASK_SIZE - len >= addr &&
17586 - (!vma || addr + len <= vma->vm_start))
17587 - return addr;
17588 + if (TASK_SIZE - len >= addr) {
17589 + vma = find_vma(mm, addr);
17590 + if (check_heap_stack_gap(vma, addr, len))
17591 + return addr;
17592 + }
17593 }
17594
17595 /* check if free_area_cache is useful for us */
17596 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17597 ALIGN_TOPDOWN);
17598
17599 vma = find_vma(mm, tmp_addr);
17600 - if (!vma || tmp_addr + len <= vma->vm_start)
17601 + if (check_heap_stack_gap(vma, tmp_addr, len))
17602 /* remember the address as a hint for next time */
17603 return mm->free_area_cache = tmp_addr;
17604 }
17605 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17606 * return with success:
17607 */
17608 vma = find_vma(mm, addr);
17609 - if (!vma || addr+len <= vma->vm_start)
17610 + if (check_heap_stack_gap(vma, addr, len))
17611 /* remember the address as a hint for next time */
17612 return mm->free_area_cache = addr;
17613
17614 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17615 mm->cached_hole_size = vma->vm_start - addr;
17616
17617 /* try just below the current vma->vm_start */
17618 - addr = vma->vm_start-len;
17619 - } while (len < vma->vm_start);
17620 + addr = skip_heap_stack_gap(vma, len);
17621 + } while (!IS_ERR_VALUE(addr));
17622
17623 bottomup:
17624 /*
17625 @@ -270,13 +278,21 @@ bottomup:
17626 * can happen with large stack limits and large mmap()
17627 * allocations.
17628 */
17629 + mm->mmap_base = TASK_UNMAPPED_BASE;
17630 +
17631 +#ifdef CONFIG_PAX_RANDMMAP
17632 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17633 + mm->mmap_base += mm->delta_mmap;
17634 +#endif
17635 +
17636 + mm->free_area_cache = mm->mmap_base;
17637 mm->cached_hole_size = ~0UL;
17638 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17639 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17640 /*
17641 * Restore the topdown base:
17642 */
17643 - mm->free_area_cache = mm->mmap_base;
17644 + mm->mmap_base = base;
17645 + mm->free_area_cache = base;
17646 mm->cached_hole_size = ~0UL;
17647
17648 return addr;
17649 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17650 index 9a0e312..e6f66f2 100644
17651 --- a/arch/x86/kernel/syscall_table_32.S
17652 +++ b/arch/x86/kernel/syscall_table_32.S
17653 @@ -1,3 +1,4 @@
17654 +.section .rodata,"a",@progbits
17655 ENTRY(sys_call_table)
17656 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17657 .long sys_exit
17658 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17659 index e2410e2..4fe3fbc 100644
17660 --- a/arch/x86/kernel/tboot.c
17661 +++ b/arch/x86/kernel/tboot.c
17662 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17663
17664 void tboot_shutdown(u32 shutdown_type)
17665 {
17666 - void (*shutdown)(void);
17667 + void (* __noreturn shutdown)(void);
17668
17669 if (!tboot_enabled())
17670 return;
17671 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17672
17673 switch_to_tboot_pt();
17674
17675 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17676 + shutdown = (void *)tboot->shutdown_entry;
17677 shutdown();
17678
17679 /* should not reach here */
17680 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17681 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17682 }
17683
17684 -static atomic_t ap_wfs_count;
17685 +static atomic_unchecked_t ap_wfs_count;
17686
17687 static int tboot_wait_for_aps(int num_aps)
17688 {
17689 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17690 {
17691 switch (action) {
17692 case CPU_DYING:
17693 - atomic_inc(&ap_wfs_count);
17694 + atomic_inc_unchecked(&ap_wfs_count);
17695 if (num_online_cpus() == 1)
17696 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17697 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17698 return NOTIFY_BAD;
17699 break;
17700 }
17701 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17702
17703 tboot_create_trampoline();
17704
17705 - atomic_set(&ap_wfs_count, 0);
17706 + atomic_set_unchecked(&ap_wfs_count, 0);
17707 register_hotcpu_notifier(&tboot_cpu_notifier);
17708 return 0;
17709 }
17710 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17711 index dd5fbf4..b7f2232 100644
17712 --- a/arch/x86/kernel/time.c
17713 +++ b/arch/x86/kernel/time.c
17714 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17715 {
17716 unsigned long pc = instruction_pointer(regs);
17717
17718 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17719 + if (!user_mode(regs) && in_lock_functions(pc)) {
17720 #ifdef CONFIG_FRAME_POINTER
17721 - return *(unsigned long *)(regs->bp + sizeof(long));
17722 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17723 #else
17724 unsigned long *sp =
17725 (unsigned long *)kernel_stack_pointer(regs);
17726 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17727 * or above a saved flags. Eflags has bits 22-31 zero,
17728 * kernel addresses don't.
17729 */
17730 +
17731 +#ifdef CONFIG_PAX_KERNEXEC
17732 + return ktla_ktva(sp[0]);
17733 +#else
17734 if (sp[0] >> 22)
17735 return sp[0];
17736 if (sp[1] >> 22)
17737 return sp[1];
17738 #endif
17739 +
17740 +#endif
17741 }
17742 return pc;
17743 }
17744 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17745 index 6bb7b85..dd853e1 100644
17746 --- a/arch/x86/kernel/tls.c
17747 +++ b/arch/x86/kernel/tls.c
17748 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17749 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17750 return -EINVAL;
17751
17752 +#ifdef CONFIG_PAX_SEGMEXEC
17753 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17754 + return -EINVAL;
17755 +#endif
17756 +
17757 set_tls_desc(p, idx, &info, 1);
17758
17759 return 0;
17760 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17761 index 451c0a7..e57f551 100644
17762 --- a/arch/x86/kernel/trampoline_32.S
17763 +++ b/arch/x86/kernel/trampoline_32.S
17764 @@ -32,6 +32,12 @@
17765 #include <asm/segment.h>
17766 #include <asm/page_types.h>
17767
17768 +#ifdef CONFIG_PAX_KERNEXEC
17769 +#define ta(X) (X)
17770 +#else
17771 +#define ta(X) ((X) - __PAGE_OFFSET)
17772 +#endif
17773 +
17774 #ifdef CONFIG_SMP
17775
17776 .section ".x86_trampoline","a"
17777 @@ -62,7 +68,7 @@ r_base = .
17778 inc %ax # protected mode (PE) bit
17779 lmsw %ax # into protected mode
17780 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17781 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17782 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17783
17784 # These need to be in the same 64K segment as the above;
17785 # hence we don't use the boot_gdt_descr defined in head.S
17786 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17787 index 09ff517..df19fbff 100644
17788 --- a/arch/x86/kernel/trampoline_64.S
17789 +++ b/arch/x86/kernel/trampoline_64.S
17790 @@ -90,7 +90,7 @@ startup_32:
17791 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17792 movl %eax, %ds
17793
17794 - movl $X86_CR4_PAE, %eax
17795 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17796 movl %eax, %cr4 # Enable PAE mode
17797
17798 # Setup trampoline 4 level pagetables
17799 @@ -138,7 +138,7 @@ tidt:
17800 # so the kernel can live anywhere
17801 .balign 4
17802 tgdt:
17803 - .short tgdt_end - tgdt # gdt limit
17804 + .short tgdt_end - tgdt - 1 # gdt limit
17805 .long tgdt - r_base
17806 .short 0
17807 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17808 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17809 index 31d9d0f..e244dd9 100644
17810 --- a/arch/x86/kernel/traps.c
17811 +++ b/arch/x86/kernel/traps.c
17812 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17813
17814 /* Do we ignore FPU interrupts ? */
17815 char ignore_fpu_irq;
17816 -
17817 -/*
17818 - * The IDT has to be page-aligned to simplify the Pentium
17819 - * F0 0F bug workaround.
17820 - */
17821 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17822 #endif
17823
17824 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17825 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17826 }
17827
17828 static void __kprobes
17829 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17830 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17831 long error_code, siginfo_t *info)
17832 {
17833 struct task_struct *tsk = current;
17834
17835 #ifdef CONFIG_X86_32
17836 - if (regs->flags & X86_VM_MASK) {
17837 + if (v8086_mode(regs)) {
17838 /*
17839 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17840 * On nmi (interrupt 2), do_trap should not be called.
17841 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17842 }
17843 #endif
17844
17845 - if (!user_mode(regs))
17846 + if (!user_mode_novm(regs))
17847 goto kernel_trap;
17848
17849 #ifdef CONFIG_X86_32
17850 @@ -148,7 +142,7 @@ trap_signal:
17851 printk_ratelimit()) {
17852 printk(KERN_INFO
17853 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17854 - tsk->comm, tsk->pid, str,
17855 + tsk->comm, task_pid_nr(tsk), str,
17856 regs->ip, regs->sp, error_code);
17857 print_vma_addr(" in ", regs->ip);
17858 printk("\n");
17859 @@ -165,8 +159,20 @@ kernel_trap:
17860 if (!fixup_exception(regs)) {
17861 tsk->thread.error_code = error_code;
17862 tsk->thread.trap_no = trapnr;
17863 +
17864 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17865 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17866 + str = "PAX: suspicious stack segment fault";
17867 +#endif
17868 +
17869 die(str, regs, error_code);
17870 }
17871 +
17872 +#ifdef CONFIG_PAX_REFCOUNT
17873 + if (trapnr == 4)
17874 + pax_report_refcount_overflow(regs);
17875 +#endif
17876 +
17877 return;
17878
17879 #ifdef CONFIG_X86_32
17880 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17881 conditional_sti(regs);
17882
17883 #ifdef CONFIG_X86_32
17884 - if (regs->flags & X86_VM_MASK)
17885 + if (v8086_mode(regs))
17886 goto gp_in_vm86;
17887 #endif
17888
17889 tsk = current;
17890 - if (!user_mode(regs))
17891 + if (!user_mode_novm(regs))
17892 goto gp_in_kernel;
17893
17894 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17895 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17896 + struct mm_struct *mm = tsk->mm;
17897 + unsigned long limit;
17898 +
17899 + down_write(&mm->mmap_sem);
17900 + limit = mm->context.user_cs_limit;
17901 + if (limit < TASK_SIZE) {
17902 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17903 + up_write(&mm->mmap_sem);
17904 + return;
17905 + }
17906 + up_write(&mm->mmap_sem);
17907 + }
17908 +#endif
17909 +
17910 tsk->thread.error_code = error_code;
17911 tsk->thread.trap_no = 13;
17912
17913 @@ -295,6 +317,13 @@ gp_in_kernel:
17914 if (notify_die(DIE_GPF, "general protection fault", regs,
17915 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17916 return;
17917 +
17918 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17919 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17920 + die("PAX: suspicious general protection fault", regs, error_code);
17921 + else
17922 +#endif
17923 +
17924 die("general protection fault", regs, error_code);
17925 }
17926
17927 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17928 /* It's safe to allow irq's after DR6 has been saved */
17929 preempt_conditional_sti(regs);
17930
17931 - if (regs->flags & X86_VM_MASK) {
17932 + if (v8086_mode(regs)) {
17933 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17934 error_code, 1);
17935 preempt_conditional_cli(regs);
17936 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17937 * We already checked v86 mode above, so we can check for kernel mode
17938 * by just checking the CPL of CS.
17939 */
17940 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17941 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17942 tsk->thread.debugreg6 &= ~DR_STEP;
17943 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17944 regs->flags &= ~X86_EFLAGS_TF;
17945 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17946 return;
17947 conditional_sti(regs);
17948
17949 - if (!user_mode_vm(regs))
17950 + if (!user_mode(regs))
17951 {
17952 if (!fixup_exception(regs)) {
17953 task->thread.error_code = error_code;
17954 @@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17955 void __math_state_restore(struct task_struct *tsk)
17956 {
17957 /* We need a safe address that is cheap to find and that is already
17958 - in L1. We've just brought in "tsk->thread.has_fpu", so use that */
17959 -#define safe_address (tsk->thread.has_fpu)
17960 + in L1. */
17961 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
17962
17963 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
17964 is pending. Clear the x87 state here by setting it to fixed
17965 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17966 index b9242ba..50c5edd 100644
17967 --- a/arch/x86/kernel/verify_cpu.S
17968 +++ b/arch/x86/kernel/verify_cpu.S
17969 @@ -20,6 +20,7 @@
17970 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17971 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17972 * arch/x86/kernel/head_32.S: processor startup
17973 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17974 *
17975 * verify_cpu, returns the status of longmode and SSE in register %eax.
17976 * 0: Success 1: Failure
17977 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17978 index 863f875..4307295 100644
17979 --- a/arch/x86/kernel/vm86_32.c
17980 +++ b/arch/x86/kernel/vm86_32.c
17981 @@ -41,6 +41,7 @@
17982 #include <linux/ptrace.h>
17983 #include <linux/audit.h>
17984 #include <linux/stddef.h>
17985 +#include <linux/grsecurity.h>
17986
17987 #include <asm/uaccess.h>
17988 #include <asm/io.h>
17989 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17990 do_exit(SIGSEGV);
17991 }
17992
17993 - tss = &per_cpu(init_tss, get_cpu());
17994 + tss = init_tss + get_cpu();
17995 current->thread.sp0 = current->thread.saved_sp0;
17996 current->thread.sysenter_cs = __KERNEL_CS;
17997 load_sp0(tss, &current->thread);
17998 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17999 struct task_struct *tsk;
18000 int tmp, ret = -EPERM;
18001
18002 +#ifdef CONFIG_GRKERNSEC_VM86
18003 + if (!capable(CAP_SYS_RAWIO)) {
18004 + gr_handle_vm86();
18005 + goto out;
18006 + }
18007 +#endif
18008 +
18009 tsk = current;
18010 if (tsk->thread.saved_sp0)
18011 goto out;
18012 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
18013 int tmp, ret;
18014 struct vm86plus_struct __user *v86;
18015
18016 +#ifdef CONFIG_GRKERNSEC_VM86
18017 + if (!capable(CAP_SYS_RAWIO)) {
18018 + gr_handle_vm86();
18019 + ret = -EPERM;
18020 + goto out;
18021 + }
18022 +#endif
18023 +
18024 tsk = current;
18025 switch (cmd) {
18026 case VM86_REQUEST_IRQ:
18027 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
18028 tsk->thread.saved_fs = info->regs32->fs;
18029 tsk->thread.saved_gs = get_user_gs(info->regs32);
18030
18031 - tss = &per_cpu(init_tss, get_cpu());
18032 + tss = init_tss + get_cpu();
18033 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18034 if (cpu_has_sep)
18035 tsk->thread.sysenter_cs = 0;
18036 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
18037 goto cannot_handle;
18038 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18039 goto cannot_handle;
18040 - intr_ptr = (unsigned long __user *) (i << 2);
18041 + intr_ptr = (__force unsigned long __user *) (i << 2);
18042 if (get_user(segoffs, intr_ptr))
18043 goto cannot_handle;
18044 if ((segoffs >> 16) == BIOSSEG)
18045 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
18046 index 0f703f1..9e15f64 100644
18047 --- a/arch/x86/kernel/vmlinux.lds.S
18048 +++ b/arch/x86/kernel/vmlinux.lds.S
18049 @@ -26,6 +26,13 @@
18050 #include <asm/page_types.h>
18051 #include <asm/cache.h>
18052 #include <asm/boot.h>
18053 +#include <asm/segment.h>
18054 +
18055 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18056 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18057 +#else
18058 +#define __KERNEL_TEXT_OFFSET 0
18059 +#endif
18060
18061 #undef i386 /* in case the preprocessor is a 32bit one */
18062
18063 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
18064
18065 PHDRS {
18066 text PT_LOAD FLAGS(5); /* R_E */
18067 +#ifdef CONFIG_X86_32
18068 + module PT_LOAD FLAGS(5); /* R_E */
18069 +#endif
18070 +#ifdef CONFIG_XEN
18071 + rodata PT_LOAD FLAGS(5); /* R_E */
18072 +#else
18073 + rodata PT_LOAD FLAGS(4); /* R__ */
18074 +#endif
18075 data PT_LOAD FLAGS(6); /* RW_ */
18076 -#ifdef CONFIG_X86_64
18077 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18078 #ifdef CONFIG_SMP
18079 percpu PT_LOAD FLAGS(6); /* RW_ */
18080 #endif
18081 + text.init PT_LOAD FLAGS(5); /* R_E */
18082 + text.exit PT_LOAD FLAGS(5); /* R_E */
18083 init PT_LOAD FLAGS(7); /* RWE */
18084 -#endif
18085 note PT_NOTE FLAGS(0); /* ___ */
18086 }
18087
18088 SECTIONS
18089 {
18090 #ifdef CONFIG_X86_32
18091 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18092 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18093 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18094 #else
18095 - . = __START_KERNEL;
18096 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18097 + . = __START_KERNEL;
18098 #endif
18099
18100 /* Text and read-only data */
18101 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18102 - _text = .;
18103 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18104 /* bootstrapping code */
18105 +#ifdef CONFIG_X86_32
18106 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18107 +#else
18108 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18109 +#endif
18110 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18111 + _text = .;
18112 HEAD_TEXT
18113 #ifdef CONFIG_X86_32
18114 . = ALIGN(PAGE_SIZE);
18115 @@ -108,13 +128,47 @@ SECTIONS
18116 IRQENTRY_TEXT
18117 *(.fixup)
18118 *(.gnu.warning)
18119 - /* End of text section */
18120 - _etext = .;
18121 } :text = 0x9090
18122
18123 - NOTES :text :note
18124 + . += __KERNEL_TEXT_OFFSET;
18125
18126 - EXCEPTION_TABLE(16) :text = 0x9090
18127 +#ifdef CONFIG_X86_32
18128 + . = ALIGN(PAGE_SIZE);
18129 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18130 +
18131 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18132 + MODULES_EXEC_VADDR = .;
18133 + BYTE(0)
18134 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18135 + . = ALIGN(HPAGE_SIZE);
18136 + MODULES_EXEC_END = . - 1;
18137 +#endif
18138 +
18139 + } :module
18140 +#endif
18141 +
18142 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18143 + /* End of text section */
18144 + _etext = . - __KERNEL_TEXT_OFFSET;
18145 + }
18146 +
18147 +#ifdef CONFIG_X86_32
18148 + . = ALIGN(PAGE_SIZE);
18149 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18150 + *(.idt)
18151 + . = ALIGN(PAGE_SIZE);
18152 + *(.empty_zero_page)
18153 + *(.initial_pg_fixmap)
18154 + *(.initial_pg_pmd)
18155 + *(.initial_page_table)
18156 + *(.swapper_pg_dir)
18157 + } :rodata
18158 +#endif
18159 +
18160 + . = ALIGN(PAGE_SIZE);
18161 + NOTES :rodata :note
18162 +
18163 + EXCEPTION_TABLE(16) :rodata
18164
18165 #if defined(CONFIG_DEBUG_RODATA)
18166 /* .text should occupy whole number of pages */
18167 @@ -126,16 +180,20 @@ SECTIONS
18168
18169 /* Data */
18170 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18171 +
18172 +#ifdef CONFIG_PAX_KERNEXEC
18173 + . = ALIGN(HPAGE_SIZE);
18174 +#else
18175 + . = ALIGN(PAGE_SIZE);
18176 +#endif
18177 +
18178 /* Start of data section */
18179 _sdata = .;
18180
18181 /* init_task */
18182 INIT_TASK_DATA(THREAD_SIZE)
18183
18184 -#ifdef CONFIG_X86_32
18185 - /* 32 bit has nosave before _edata */
18186 NOSAVE_DATA
18187 -#endif
18188
18189 PAGE_ALIGNED_DATA(PAGE_SIZE)
18190
18191 @@ -176,12 +234,19 @@ SECTIONS
18192 #endif /* CONFIG_X86_64 */
18193
18194 /* Init code and data - will be freed after init */
18195 - . = ALIGN(PAGE_SIZE);
18196 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18197 + BYTE(0)
18198 +
18199 +#ifdef CONFIG_PAX_KERNEXEC
18200 + . = ALIGN(HPAGE_SIZE);
18201 +#else
18202 + . = ALIGN(PAGE_SIZE);
18203 +#endif
18204 +
18205 __init_begin = .; /* paired with __init_end */
18206 - }
18207 + } :init.begin
18208
18209 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18210 +#ifdef CONFIG_SMP
18211 /*
18212 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18213 * output PHDR, so the next output section - .init.text - should
18214 @@ -190,12 +255,27 @@ SECTIONS
18215 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
18216 #endif
18217
18218 - INIT_TEXT_SECTION(PAGE_SIZE)
18219 -#ifdef CONFIG_X86_64
18220 - :init
18221 -#endif
18222 + . = ALIGN(PAGE_SIZE);
18223 + init_begin = .;
18224 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18225 + VMLINUX_SYMBOL(_sinittext) = .;
18226 + INIT_TEXT
18227 + VMLINUX_SYMBOL(_einittext) = .;
18228 + . = ALIGN(PAGE_SIZE);
18229 + } :text.init
18230
18231 - INIT_DATA_SECTION(16)
18232 + /*
18233 + * .exit.text is discard at runtime, not link time, to deal with
18234 + * references from .altinstructions and .eh_frame
18235 + */
18236 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18237 + EXIT_TEXT
18238 + . = ALIGN(16);
18239 + } :text.exit
18240 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18241 +
18242 + . = ALIGN(PAGE_SIZE);
18243 + INIT_DATA_SECTION(16) :init
18244
18245 /*
18246 * Code and data for a variety of lowlevel trampolines, to be
18247 @@ -269,19 +349,12 @@ SECTIONS
18248 }
18249
18250 . = ALIGN(8);
18251 - /*
18252 - * .exit.text is discard at runtime, not link time, to deal with
18253 - * references from .altinstructions and .eh_frame
18254 - */
18255 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18256 - EXIT_TEXT
18257 - }
18258
18259 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18260 EXIT_DATA
18261 }
18262
18263 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18264 +#ifndef CONFIG_SMP
18265 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18266 #endif
18267
18268 @@ -300,16 +373,10 @@ SECTIONS
18269 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18270 __smp_locks = .;
18271 *(.smp_locks)
18272 - . = ALIGN(PAGE_SIZE);
18273 __smp_locks_end = .;
18274 + . = ALIGN(PAGE_SIZE);
18275 }
18276
18277 -#ifdef CONFIG_X86_64
18278 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18279 - NOSAVE_DATA
18280 - }
18281 -#endif
18282 -
18283 /* BSS */
18284 . = ALIGN(PAGE_SIZE);
18285 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18286 @@ -325,6 +392,7 @@ SECTIONS
18287 __brk_base = .;
18288 . += 64 * 1024; /* 64k alignment slop space */
18289 *(.brk_reservation) /* areas brk users have reserved */
18290 + . = ALIGN(HPAGE_SIZE);
18291 __brk_limit = .;
18292 }
18293
18294 @@ -351,13 +419,12 @@ SECTIONS
18295 * for the boot processor.
18296 */
18297 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18298 -INIT_PER_CPU(gdt_page);
18299 INIT_PER_CPU(irq_stack_union);
18300
18301 /*
18302 * Build-time check on the image size:
18303 */
18304 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18305 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18306 "kernel image bigger than KERNEL_IMAGE_SIZE");
18307
18308 #ifdef CONFIG_SMP
18309 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18310 index e4d4a22..47ee71f 100644
18311 --- a/arch/x86/kernel/vsyscall_64.c
18312 +++ b/arch/x86/kernel/vsyscall_64.c
18313 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18314 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18315 };
18316
18317 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18318 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18319
18320 static int __init vsyscall_setup(char *str)
18321 {
18322 if (str) {
18323 if (!strcmp("emulate", str))
18324 vsyscall_mode = EMULATE;
18325 - else if (!strcmp("native", str))
18326 - vsyscall_mode = NATIVE;
18327 else if (!strcmp("none", str))
18328 vsyscall_mode = NONE;
18329 else
18330 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18331
18332 tsk = current;
18333 if (seccomp_mode(&tsk->seccomp))
18334 - do_exit(SIGKILL);
18335 + do_group_exit(SIGKILL);
18336
18337 switch (vsyscall_nr) {
18338 case 0:
18339 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18340 return true;
18341
18342 sigsegv:
18343 - force_sig(SIGSEGV, current);
18344 - return true;
18345 + do_group_exit(SIGKILL);
18346 }
18347
18348 /*
18349 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18350 extern char __vvar_page;
18351 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18352
18353 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18354 - vsyscall_mode == NATIVE
18355 - ? PAGE_KERNEL_VSYSCALL
18356 - : PAGE_KERNEL_VVAR);
18357 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18358 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18359 (unsigned long)VSYSCALL_START);
18360
18361 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18362 index 9796c2f..f686fbf 100644
18363 --- a/arch/x86/kernel/x8664_ksyms_64.c
18364 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18365 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18366 EXPORT_SYMBOL(copy_user_generic_string);
18367 EXPORT_SYMBOL(copy_user_generic_unrolled);
18368 EXPORT_SYMBOL(__copy_user_nocache);
18369 -EXPORT_SYMBOL(_copy_from_user);
18370 -EXPORT_SYMBOL(_copy_to_user);
18371
18372 EXPORT_SYMBOL(copy_page);
18373 EXPORT_SYMBOL(clear_page);
18374 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18375 index 7110911..e8cdee5 100644
18376 --- a/arch/x86/kernel/xsave.c
18377 +++ b/arch/x86/kernel/xsave.c
18378 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18379 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18380 return -EINVAL;
18381
18382 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18383 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18384 fx_sw_user->extended_size -
18385 FP_XSTATE_MAGIC2_SIZE));
18386 if (err)
18387 @@ -266,7 +266,7 @@ fx_only:
18388 * the other extended state.
18389 */
18390 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18391 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18392 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18393 }
18394
18395 /*
18396 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
18397 if (use_xsave())
18398 err = restore_user_xstate(buf);
18399 else
18400 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18401 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18402 buf);
18403 if (unlikely(err)) {
18404 /*
18405 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18406 index f1e3be1..588efc8 100644
18407 --- a/arch/x86/kvm/emulate.c
18408 +++ b/arch/x86/kvm/emulate.c
18409 @@ -249,6 +249,7 @@ struct gprefix {
18410
18411 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18412 do { \
18413 + unsigned long _tmp; \
18414 __asm__ __volatile__ ( \
18415 _PRE_EFLAGS("0", "4", "2") \
18416 _op _suffix " %"_x"3,%1; " \
18417 @@ -263,8 +264,6 @@ struct gprefix {
18418 /* Raw emulation: instruction has two explicit operands. */
18419 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18420 do { \
18421 - unsigned long _tmp; \
18422 - \
18423 switch ((ctxt)->dst.bytes) { \
18424 case 2: \
18425 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18426 @@ -280,7 +279,6 @@ struct gprefix {
18427
18428 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18429 do { \
18430 - unsigned long _tmp; \
18431 switch ((ctxt)->dst.bytes) { \
18432 case 1: \
18433 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18434 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18435 index 54abb40..a192606 100644
18436 --- a/arch/x86/kvm/lapic.c
18437 +++ b/arch/x86/kvm/lapic.c
18438 @@ -53,7 +53,7 @@
18439 #define APIC_BUS_CYCLE_NS 1
18440
18441 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18442 -#define apic_debug(fmt, arg...)
18443 +#define apic_debug(fmt, arg...) do {} while (0)
18444
18445 #define APIC_LVT_NUM 6
18446 /* 14 is the version for Xeon and Pentium 8.4.8*/
18447 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18448 index f1b36cf..af8a124 100644
18449 --- a/arch/x86/kvm/mmu.c
18450 +++ b/arch/x86/kvm/mmu.c
18451 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18452
18453 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18454
18455 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18456 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18457
18458 /*
18459 * Assume that the pte write on a page table of the same type
18460 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18461 }
18462
18463 spin_lock(&vcpu->kvm->mmu_lock);
18464 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18465 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18466 gentry = 0;
18467 kvm_mmu_free_some_pages(vcpu);
18468 ++vcpu->kvm->stat.mmu_pte_write;
18469 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18470 index 9299410..ade2f9b 100644
18471 --- a/arch/x86/kvm/paging_tmpl.h
18472 +++ b/arch/x86/kvm/paging_tmpl.h
18473 @@ -197,7 +197,7 @@ retry_walk:
18474 if (unlikely(kvm_is_error_hva(host_addr)))
18475 goto error;
18476
18477 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18478 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18479 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18480 goto error;
18481
18482 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18483 if (need_flush)
18484 kvm_flush_remote_tlbs(vcpu->kvm);
18485
18486 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18487 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18488
18489 spin_unlock(&vcpu->kvm->mmu_lock);
18490
18491 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18492 index e32243e..a6e6172 100644
18493 --- a/arch/x86/kvm/svm.c
18494 +++ b/arch/x86/kvm/svm.c
18495 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18496 int cpu = raw_smp_processor_id();
18497
18498 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18499 +
18500 + pax_open_kernel();
18501 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18502 + pax_close_kernel();
18503 +
18504 load_TR_desc();
18505 }
18506
18507 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18508 #endif
18509 #endif
18510
18511 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18512 + __set_fs(current_thread_info()->addr_limit);
18513 +#endif
18514 +
18515 reload_tss(vcpu);
18516
18517 local_irq_disable();
18518 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18519 index 4ea7678..b3a7084 100644
18520 --- a/arch/x86/kvm/vmx.c
18521 +++ b/arch/x86/kvm/vmx.c
18522 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18523 struct desc_struct *descs;
18524
18525 descs = (void *)gdt->address;
18526 +
18527 + pax_open_kernel();
18528 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18529 + pax_close_kernel();
18530 +
18531 load_TR_desc();
18532 }
18533
18534 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18535 if (!cpu_has_vmx_flexpriority())
18536 flexpriority_enabled = 0;
18537
18538 - if (!cpu_has_vmx_tpr_shadow())
18539 - kvm_x86_ops->update_cr8_intercept = NULL;
18540 + if (!cpu_has_vmx_tpr_shadow()) {
18541 + pax_open_kernel();
18542 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18543 + pax_close_kernel();
18544 + }
18545
18546 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18547 kvm_disable_largepages();
18548 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18549 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18550
18551 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18552 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18553 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18554
18555 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18556 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18557 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18558 "jmp .Lkvm_vmx_return \n\t"
18559 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18560 ".Lkvm_vmx_return: "
18561 +
18562 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18563 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18564 + ".Lkvm_vmx_return2: "
18565 +#endif
18566 +
18567 /* Save guest registers, load host registers, keep flags */
18568 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18569 "pop %0 \n\t"
18570 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18571 #endif
18572 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18573 [wordsize]"i"(sizeof(ulong))
18574 +
18575 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18576 + ,[cs]"i"(__KERNEL_CS)
18577 +#endif
18578 +
18579 : "cc", "memory"
18580 , R"ax", R"bx", R"di", R"si"
18581 #ifdef CONFIG_X86_64
18582 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18583 }
18584 }
18585
18586 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18587 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18588 +
18589 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18590 + loadsegment(fs, __KERNEL_PERCPU);
18591 +#endif
18592 +
18593 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18594 + __set_fs(current_thread_info()->addr_limit);
18595 +#endif
18596 +
18597 vmx->loaded_vmcs->launched = 1;
18598
18599 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18600 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18601 index 4c938da..4ddef65 100644
18602 --- a/arch/x86/kvm/x86.c
18603 +++ b/arch/x86/kvm/x86.c
18604 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18605 {
18606 struct kvm *kvm = vcpu->kvm;
18607 int lm = is_long_mode(vcpu);
18608 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18609 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18610 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18611 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18612 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18613 : kvm->arch.xen_hvm_config.blob_size_32;
18614 u32 page_num = data & ~PAGE_MASK;
18615 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18616 if (n < msr_list.nmsrs)
18617 goto out;
18618 r = -EFAULT;
18619 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18620 + goto out;
18621 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18622 num_msrs_to_save * sizeof(u32)))
18623 goto out;
18624 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18625 struct kvm_cpuid2 *cpuid,
18626 struct kvm_cpuid_entry2 __user *entries)
18627 {
18628 - int r;
18629 + int r, i;
18630
18631 r = -E2BIG;
18632 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18633 goto out;
18634 r = -EFAULT;
18635 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18636 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18637 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18638 goto out;
18639 + for (i = 0; i < cpuid->nent; ++i) {
18640 + struct kvm_cpuid_entry2 cpuid_entry;
18641 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18642 + goto out;
18643 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18644 + }
18645 vcpu->arch.cpuid_nent = cpuid->nent;
18646 kvm_apic_set_version(vcpu);
18647 kvm_x86_ops->cpuid_update(vcpu);
18648 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18649 struct kvm_cpuid2 *cpuid,
18650 struct kvm_cpuid_entry2 __user *entries)
18651 {
18652 - int r;
18653 + int r, i;
18654
18655 r = -E2BIG;
18656 if (cpuid->nent < vcpu->arch.cpuid_nent)
18657 goto out;
18658 r = -EFAULT;
18659 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18660 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18661 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18662 goto out;
18663 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18664 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18665 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18666 + goto out;
18667 + }
18668 return 0;
18669
18670 out:
18671 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18672 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18673 struct kvm_interrupt *irq)
18674 {
18675 - if (irq->irq < 0 || irq->irq >= 256)
18676 + if (irq->irq >= 256)
18677 return -EINVAL;
18678 if (irqchip_in_kernel(vcpu->kvm))
18679 return -ENXIO;
18680 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18681 kvm_mmu_set_mmio_spte_mask(mask);
18682 }
18683
18684 -int kvm_arch_init(void *opaque)
18685 +int kvm_arch_init(const void *opaque)
18686 {
18687 int r;
18688 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18689 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18690 index cf4603b..7cdde38 100644
18691 --- a/arch/x86/lguest/boot.c
18692 +++ b/arch/x86/lguest/boot.c
18693 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18694 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18695 * Launcher to reboot us.
18696 */
18697 -static void lguest_restart(char *reason)
18698 +static __noreturn void lguest_restart(char *reason)
18699 {
18700 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18701 + BUG();
18702 }
18703
18704 /*G:050
18705 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18706 index 042f682..c92afb6 100644
18707 --- a/arch/x86/lib/atomic64_32.c
18708 +++ b/arch/x86/lib/atomic64_32.c
18709 @@ -8,18 +8,30 @@
18710
18711 long long atomic64_read_cx8(long long, const atomic64_t *v);
18712 EXPORT_SYMBOL(atomic64_read_cx8);
18713 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18714 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18715 long long atomic64_set_cx8(long long, const atomic64_t *v);
18716 EXPORT_SYMBOL(atomic64_set_cx8);
18717 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18718 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18719 long long atomic64_xchg_cx8(long long, unsigned high);
18720 EXPORT_SYMBOL(atomic64_xchg_cx8);
18721 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18722 EXPORT_SYMBOL(atomic64_add_return_cx8);
18723 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18724 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18725 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18726 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18727 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18728 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18729 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18730 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18731 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18732 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18733 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18734 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18735 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18736 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18737 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18738 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18739 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18740 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18741 #ifndef CONFIG_X86_CMPXCHG64
18742 long long atomic64_read_386(long long, const atomic64_t *v);
18743 EXPORT_SYMBOL(atomic64_read_386);
18744 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18745 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18746 long long atomic64_set_386(long long, const atomic64_t *v);
18747 EXPORT_SYMBOL(atomic64_set_386);
18748 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18749 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18750 long long atomic64_xchg_386(long long, unsigned high);
18751 EXPORT_SYMBOL(atomic64_xchg_386);
18752 long long atomic64_add_return_386(long long a, atomic64_t *v);
18753 EXPORT_SYMBOL(atomic64_add_return_386);
18754 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18755 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18756 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18757 EXPORT_SYMBOL(atomic64_sub_return_386);
18758 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18759 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18760 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18761 EXPORT_SYMBOL(atomic64_inc_return_386);
18762 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18763 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18764 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18765 EXPORT_SYMBOL(atomic64_dec_return_386);
18766 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18767 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18768 long long atomic64_add_386(long long a, atomic64_t *v);
18769 EXPORT_SYMBOL(atomic64_add_386);
18770 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18771 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18772 long long atomic64_sub_386(long long a, atomic64_t *v);
18773 EXPORT_SYMBOL(atomic64_sub_386);
18774 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18775 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18776 long long atomic64_inc_386(long long a, atomic64_t *v);
18777 EXPORT_SYMBOL(atomic64_inc_386);
18778 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18779 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18780 long long atomic64_dec_386(long long a, atomic64_t *v);
18781 EXPORT_SYMBOL(atomic64_dec_386);
18782 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18783 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18784 long long atomic64_dec_if_positive_386(atomic64_t *v);
18785 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18786 int atomic64_inc_not_zero_386(atomic64_t *v);
18787 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18788 index e8e7e0d..56fd1b0 100644
18789 --- a/arch/x86/lib/atomic64_386_32.S
18790 +++ b/arch/x86/lib/atomic64_386_32.S
18791 @@ -48,6 +48,10 @@ BEGIN(read)
18792 movl (v), %eax
18793 movl 4(v), %edx
18794 RET_ENDP
18795 +BEGIN(read_unchecked)
18796 + movl (v), %eax
18797 + movl 4(v), %edx
18798 +RET_ENDP
18799 #undef v
18800
18801 #define v %esi
18802 @@ -55,6 +59,10 @@ BEGIN(set)
18803 movl %ebx, (v)
18804 movl %ecx, 4(v)
18805 RET_ENDP
18806 +BEGIN(set_unchecked)
18807 + movl %ebx, (v)
18808 + movl %ecx, 4(v)
18809 +RET_ENDP
18810 #undef v
18811
18812 #define v %esi
18813 @@ -70,6 +78,20 @@ RET_ENDP
18814 BEGIN(add)
18815 addl %eax, (v)
18816 adcl %edx, 4(v)
18817 +
18818 +#ifdef CONFIG_PAX_REFCOUNT
18819 + jno 0f
18820 + subl %eax, (v)
18821 + sbbl %edx, 4(v)
18822 + int $4
18823 +0:
18824 + _ASM_EXTABLE(0b, 0b)
18825 +#endif
18826 +
18827 +RET_ENDP
18828 +BEGIN(add_unchecked)
18829 + addl %eax, (v)
18830 + adcl %edx, 4(v)
18831 RET_ENDP
18832 #undef v
18833
18834 @@ -77,6 +99,24 @@ RET_ENDP
18835 BEGIN(add_return)
18836 addl (v), %eax
18837 adcl 4(v), %edx
18838 +
18839 +#ifdef CONFIG_PAX_REFCOUNT
18840 + into
18841 +1234:
18842 + _ASM_EXTABLE(1234b, 2f)
18843 +#endif
18844 +
18845 + movl %eax, (v)
18846 + movl %edx, 4(v)
18847 +
18848 +#ifdef CONFIG_PAX_REFCOUNT
18849 +2:
18850 +#endif
18851 +
18852 +RET_ENDP
18853 +BEGIN(add_return_unchecked)
18854 + addl (v), %eax
18855 + adcl 4(v), %edx
18856 movl %eax, (v)
18857 movl %edx, 4(v)
18858 RET_ENDP
18859 @@ -86,6 +126,20 @@ RET_ENDP
18860 BEGIN(sub)
18861 subl %eax, (v)
18862 sbbl %edx, 4(v)
18863 +
18864 +#ifdef CONFIG_PAX_REFCOUNT
18865 + jno 0f
18866 + addl %eax, (v)
18867 + adcl %edx, 4(v)
18868 + int $4
18869 +0:
18870 + _ASM_EXTABLE(0b, 0b)
18871 +#endif
18872 +
18873 +RET_ENDP
18874 +BEGIN(sub_unchecked)
18875 + subl %eax, (v)
18876 + sbbl %edx, 4(v)
18877 RET_ENDP
18878 #undef v
18879
18880 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18881 sbbl $0, %edx
18882 addl (v), %eax
18883 adcl 4(v), %edx
18884 +
18885 +#ifdef CONFIG_PAX_REFCOUNT
18886 + into
18887 +1234:
18888 + _ASM_EXTABLE(1234b, 2f)
18889 +#endif
18890 +
18891 + movl %eax, (v)
18892 + movl %edx, 4(v)
18893 +
18894 +#ifdef CONFIG_PAX_REFCOUNT
18895 +2:
18896 +#endif
18897 +
18898 +RET_ENDP
18899 +BEGIN(sub_return_unchecked)
18900 + negl %edx
18901 + negl %eax
18902 + sbbl $0, %edx
18903 + addl (v), %eax
18904 + adcl 4(v), %edx
18905 movl %eax, (v)
18906 movl %edx, 4(v)
18907 RET_ENDP
18908 @@ -105,6 +180,20 @@ RET_ENDP
18909 BEGIN(inc)
18910 addl $1, (v)
18911 adcl $0, 4(v)
18912 +
18913 +#ifdef CONFIG_PAX_REFCOUNT
18914 + jno 0f
18915 + subl $1, (v)
18916 + sbbl $0, 4(v)
18917 + int $4
18918 +0:
18919 + _ASM_EXTABLE(0b, 0b)
18920 +#endif
18921 +
18922 +RET_ENDP
18923 +BEGIN(inc_unchecked)
18924 + addl $1, (v)
18925 + adcl $0, 4(v)
18926 RET_ENDP
18927 #undef v
18928
18929 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18930 movl 4(v), %edx
18931 addl $1, %eax
18932 adcl $0, %edx
18933 +
18934 +#ifdef CONFIG_PAX_REFCOUNT
18935 + into
18936 +1234:
18937 + _ASM_EXTABLE(1234b, 2f)
18938 +#endif
18939 +
18940 + movl %eax, (v)
18941 + movl %edx, 4(v)
18942 +
18943 +#ifdef CONFIG_PAX_REFCOUNT
18944 +2:
18945 +#endif
18946 +
18947 +RET_ENDP
18948 +BEGIN(inc_return_unchecked)
18949 + movl (v), %eax
18950 + movl 4(v), %edx
18951 + addl $1, %eax
18952 + adcl $0, %edx
18953 movl %eax, (v)
18954 movl %edx, 4(v)
18955 RET_ENDP
18956 @@ -123,6 +232,20 @@ RET_ENDP
18957 BEGIN(dec)
18958 subl $1, (v)
18959 sbbl $0, 4(v)
18960 +
18961 +#ifdef CONFIG_PAX_REFCOUNT
18962 + jno 0f
18963 + addl $1, (v)
18964 + adcl $0, 4(v)
18965 + int $4
18966 +0:
18967 + _ASM_EXTABLE(0b, 0b)
18968 +#endif
18969 +
18970 +RET_ENDP
18971 +BEGIN(dec_unchecked)
18972 + subl $1, (v)
18973 + sbbl $0, 4(v)
18974 RET_ENDP
18975 #undef v
18976
18977 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18978 movl 4(v), %edx
18979 subl $1, %eax
18980 sbbl $0, %edx
18981 +
18982 +#ifdef CONFIG_PAX_REFCOUNT
18983 + into
18984 +1234:
18985 + _ASM_EXTABLE(1234b, 2f)
18986 +#endif
18987 +
18988 + movl %eax, (v)
18989 + movl %edx, 4(v)
18990 +
18991 +#ifdef CONFIG_PAX_REFCOUNT
18992 +2:
18993 +#endif
18994 +
18995 +RET_ENDP
18996 +BEGIN(dec_return_unchecked)
18997 + movl (v), %eax
18998 + movl 4(v), %edx
18999 + subl $1, %eax
19000 + sbbl $0, %edx
19001 movl %eax, (v)
19002 movl %edx, 4(v)
19003 RET_ENDP
19004 @@ -143,6 +286,13 @@ BEGIN(add_unless)
19005 adcl %edx, %edi
19006 addl (v), %eax
19007 adcl 4(v), %edx
19008 +
19009 +#ifdef CONFIG_PAX_REFCOUNT
19010 + into
19011 +1234:
19012 + _ASM_EXTABLE(1234b, 2f)
19013 +#endif
19014 +
19015 cmpl %eax, %esi
19016 je 3f
19017 1:
19018 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
19019 1:
19020 addl $1, %eax
19021 adcl $0, %edx
19022 +
19023 +#ifdef CONFIG_PAX_REFCOUNT
19024 + into
19025 +1234:
19026 + _ASM_EXTABLE(1234b, 2f)
19027 +#endif
19028 +
19029 movl %eax, (v)
19030 movl %edx, 4(v)
19031 movl $1, %eax
19032 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
19033 movl 4(v), %edx
19034 subl $1, %eax
19035 sbbl $0, %edx
19036 +
19037 +#ifdef CONFIG_PAX_REFCOUNT
19038 + into
19039 +1234:
19040 + _ASM_EXTABLE(1234b, 1f)
19041 +#endif
19042 +
19043 js 1f
19044 movl %eax, (v)
19045 movl %edx, 4(v)
19046 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
19047 index 391a083..d658e9f 100644
19048 --- a/arch/x86/lib/atomic64_cx8_32.S
19049 +++ b/arch/x86/lib/atomic64_cx8_32.S
19050 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
19051 CFI_STARTPROC
19052
19053 read64 %ecx
19054 + pax_force_retaddr
19055 ret
19056 CFI_ENDPROC
19057 ENDPROC(atomic64_read_cx8)
19058
19059 +ENTRY(atomic64_read_unchecked_cx8)
19060 + CFI_STARTPROC
19061 +
19062 + read64 %ecx
19063 + pax_force_retaddr
19064 + ret
19065 + CFI_ENDPROC
19066 +ENDPROC(atomic64_read_unchecked_cx8)
19067 +
19068 ENTRY(atomic64_set_cx8)
19069 CFI_STARTPROC
19070
19071 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
19072 cmpxchg8b (%esi)
19073 jne 1b
19074
19075 + pax_force_retaddr
19076 ret
19077 CFI_ENDPROC
19078 ENDPROC(atomic64_set_cx8)
19079
19080 +ENTRY(atomic64_set_unchecked_cx8)
19081 + CFI_STARTPROC
19082 +
19083 +1:
19084 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
19085 + * are atomic on 586 and newer */
19086 + cmpxchg8b (%esi)
19087 + jne 1b
19088 +
19089 + pax_force_retaddr
19090 + ret
19091 + CFI_ENDPROC
19092 +ENDPROC(atomic64_set_unchecked_cx8)
19093 +
19094 ENTRY(atomic64_xchg_cx8)
19095 CFI_STARTPROC
19096
19097 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
19098 cmpxchg8b (%esi)
19099 jne 1b
19100
19101 + pax_force_retaddr
19102 ret
19103 CFI_ENDPROC
19104 ENDPROC(atomic64_xchg_cx8)
19105
19106 -.macro addsub_return func ins insc
19107 -ENTRY(atomic64_\func\()_return_cx8)
19108 +.macro addsub_return func ins insc unchecked=""
19109 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19110 CFI_STARTPROC
19111 SAVE ebp
19112 SAVE ebx
19113 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
19114 movl %edx, %ecx
19115 \ins\()l %esi, %ebx
19116 \insc\()l %edi, %ecx
19117 +
19118 +.ifb \unchecked
19119 +#ifdef CONFIG_PAX_REFCOUNT
19120 + into
19121 +2:
19122 + _ASM_EXTABLE(2b, 3f)
19123 +#endif
19124 +.endif
19125 +
19126 LOCK_PREFIX
19127 cmpxchg8b (%ebp)
19128 jne 1b
19129 -
19130 -10:
19131 movl %ebx, %eax
19132 movl %ecx, %edx
19133 +
19134 +.ifb \unchecked
19135 +#ifdef CONFIG_PAX_REFCOUNT
19136 +3:
19137 +#endif
19138 +.endif
19139 +
19140 RESTORE edi
19141 RESTORE esi
19142 RESTORE ebx
19143 RESTORE ebp
19144 + pax_force_retaddr
19145 ret
19146 CFI_ENDPROC
19147 -ENDPROC(atomic64_\func\()_return_cx8)
19148 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19149 .endm
19150
19151 addsub_return add add adc
19152 addsub_return sub sub sbb
19153 +addsub_return add add adc _unchecked
19154 +addsub_return sub sub sbb _unchecked
19155
19156 -.macro incdec_return func ins insc
19157 -ENTRY(atomic64_\func\()_return_cx8)
19158 +.macro incdec_return func ins insc unchecked
19159 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
19160 CFI_STARTPROC
19161 SAVE ebx
19162
19163 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
19164 movl %edx, %ecx
19165 \ins\()l $1, %ebx
19166 \insc\()l $0, %ecx
19167 +
19168 +.ifb \unchecked
19169 +#ifdef CONFIG_PAX_REFCOUNT
19170 + into
19171 +2:
19172 + _ASM_EXTABLE(2b, 3f)
19173 +#endif
19174 +.endif
19175 +
19176 LOCK_PREFIX
19177 cmpxchg8b (%esi)
19178 jne 1b
19179
19180 -10:
19181 movl %ebx, %eax
19182 movl %ecx, %edx
19183 +
19184 +.ifb \unchecked
19185 +#ifdef CONFIG_PAX_REFCOUNT
19186 +3:
19187 +#endif
19188 +.endif
19189 +
19190 RESTORE ebx
19191 + pax_force_retaddr
19192 ret
19193 CFI_ENDPROC
19194 -ENDPROC(atomic64_\func\()_return_cx8)
19195 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
19196 .endm
19197
19198 incdec_return inc add adc
19199 incdec_return dec sub sbb
19200 +incdec_return inc add adc _unchecked
19201 +incdec_return dec sub sbb _unchecked
19202
19203 ENTRY(atomic64_dec_if_positive_cx8)
19204 CFI_STARTPROC
19205 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
19206 movl %edx, %ecx
19207 subl $1, %ebx
19208 sbb $0, %ecx
19209 +
19210 +#ifdef CONFIG_PAX_REFCOUNT
19211 + into
19212 +1234:
19213 + _ASM_EXTABLE(1234b, 2f)
19214 +#endif
19215 +
19216 js 2f
19217 LOCK_PREFIX
19218 cmpxchg8b (%esi)
19219 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
19220 movl %ebx, %eax
19221 movl %ecx, %edx
19222 RESTORE ebx
19223 + pax_force_retaddr
19224 ret
19225 CFI_ENDPROC
19226 ENDPROC(atomic64_dec_if_positive_cx8)
19227 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19228 movl %edx, %ecx
19229 addl %esi, %ebx
19230 adcl %edi, %ecx
19231 +
19232 +#ifdef CONFIG_PAX_REFCOUNT
19233 + into
19234 +1234:
19235 + _ASM_EXTABLE(1234b, 3f)
19236 +#endif
19237 +
19238 LOCK_PREFIX
19239 cmpxchg8b (%ebp)
19240 jne 1b
19241 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19242 CFI_ADJUST_CFA_OFFSET -8
19243 RESTORE ebx
19244 RESTORE ebp
19245 + pax_force_retaddr
19246 ret
19247 4:
19248 cmpl %edx, 4(%esp)
19249 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19250 movl %edx, %ecx
19251 addl $1, %ebx
19252 adcl $0, %ecx
19253 +
19254 +#ifdef CONFIG_PAX_REFCOUNT
19255 + into
19256 +1234:
19257 + _ASM_EXTABLE(1234b, 3f)
19258 +#endif
19259 +
19260 LOCK_PREFIX
19261 cmpxchg8b (%esi)
19262 jne 1b
19263 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19264 movl $1, %eax
19265 3:
19266 RESTORE ebx
19267 + pax_force_retaddr
19268 ret
19269 4:
19270 testl %edx, %edx
19271 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19272 index 78d16a5..fbcf666 100644
19273 --- a/arch/x86/lib/checksum_32.S
19274 +++ b/arch/x86/lib/checksum_32.S
19275 @@ -28,7 +28,8 @@
19276 #include <linux/linkage.h>
19277 #include <asm/dwarf2.h>
19278 #include <asm/errno.h>
19279 -
19280 +#include <asm/segment.h>
19281 +
19282 /*
19283 * computes a partial checksum, e.g. for TCP/UDP fragments
19284 */
19285 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19286
19287 #define ARGBASE 16
19288 #define FP 12
19289 -
19290 -ENTRY(csum_partial_copy_generic)
19291 +
19292 +ENTRY(csum_partial_copy_generic_to_user)
19293 CFI_STARTPROC
19294 +
19295 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19296 + pushl_cfi %gs
19297 + popl_cfi %es
19298 + jmp csum_partial_copy_generic
19299 +#endif
19300 +
19301 +ENTRY(csum_partial_copy_generic_from_user)
19302 +
19303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19304 + pushl_cfi %gs
19305 + popl_cfi %ds
19306 +#endif
19307 +
19308 +ENTRY(csum_partial_copy_generic)
19309 subl $4,%esp
19310 CFI_ADJUST_CFA_OFFSET 4
19311 pushl_cfi %edi
19312 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19313 jmp 4f
19314 SRC(1: movw (%esi), %bx )
19315 addl $2, %esi
19316 -DST( movw %bx, (%edi) )
19317 +DST( movw %bx, %es:(%edi) )
19318 addl $2, %edi
19319 addw %bx, %ax
19320 adcl $0, %eax
19321 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19322 SRC(1: movl (%esi), %ebx )
19323 SRC( movl 4(%esi), %edx )
19324 adcl %ebx, %eax
19325 -DST( movl %ebx, (%edi) )
19326 +DST( movl %ebx, %es:(%edi) )
19327 adcl %edx, %eax
19328 -DST( movl %edx, 4(%edi) )
19329 +DST( movl %edx, %es:4(%edi) )
19330
19331 SRC( movl 8(%esi), %ebx )
19332 SRC( movl 12(%esi), %edx )
19333 adcl %ebx, %eax
19334 -DST( movl %ebx, 8(%edi) )
19335 +DST( movl %ebx, %es:8(%edi) )
19336 adcl %edx, %eax
19337 -DST( movl %edx, 12(%edi) )
19338 +DST( movl %edx, %es:12(%edi) )
19339
19340 SRC( movl 16(%esi), %ebx )
19341 SRC( movl 20(%esi), %edx )
19342 adcl %ebx, %eax
19343 -DST( movl %ebx, 16(%edi) )
19344 +DST( movl %ebx, %es:16(%edi) )
19345 adcl %edx, %eax
19346 -DST( movl %edx, 20(%edi) )
19347 +DST( movl %edx, %es:20(%edi) )
19348
19349 SRC( movl 24(%esi), %ebx )
19350 SRC( movl 28(%esi), %edx )
19351 adcl %ebx, %eax
19352 -DST( movl %ebx, 24(%edi) )
19353 +DST( movl %ebx, %es:24(%edi) )
19354 adcl %edx, %eax
19355 -DST( movl %edx, 28(%edi) )
19356 +DST( movl %edx, %es:28(%edi) )
19357
19358 lea 32(%esi), %esi
19359 lea 32(%edi), %edi
19360 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19361 shrl $2, %edx # This clears CF
19362 SRC(3: movl (%esi), %ebx )
19363 adcl %ebx, %eax
19364 -DST( movl %ebx, (%edi) )
19365 +DST( movl %ebx, %es:(%edi) )
19366 lea 4(%esi), %esi
19367 lea 4(%edi), %edi
19368 dec %edx
19369 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19370 jb 5f
19371 SRC( movw (%esi), %cx )
19372 leal 2(%esi), %esi
19373 -DST( movw %cx, (%edi) )
19374 +DST( movw %cx, %es:(%edi) )
19375 leal 2(%edi), %edi
19376 je 6f
19377 shll $16,%ecx
19378 SRC(5: movb (%esi), %cl )
19379 -DST( movb %cl, (%edi) )
19380 +DST( movb %cl, %es:(%edi) )
19381 6: addl %ecx, %eax
19382 adcl $0, %eax
19383 7:
19384 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19385
19386 6001:
19387 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19388 - movl $-EFAULT, (%ebx)
19389 + movl $-EFAULT, %ss:(%ebx)
19390
19391 # zero the complete destination - computing the rest
19392 # is too much work
19393 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19394
19395 6002:
19396 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19397 - movl $-EFAULT,(%ebx)
19398 + movl $-EFAULT,%ss:(%ebx)
19399 jmp 5000b
19400
19401 .previous
19402
19403 + pushl_cfi %ss
19404 + popl_cfi %ds
19405 + pushl_cfi %ss
19406 + popl_cfi %es
19407 popl_cfi %ebx
19408 CFI_RESTORE ebx
19409 popl_cfi %esi
19410 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19411 popl_cfi %ecx # equivalent to addl $4,%esp
19412 ret
19413 CFI_ENDPROC
19414 -ENDPROC(csum_partial_copy_generic)
19415 +ENDPROC(csum_partial_copy_generic_to_user)
19416
19417 #else
19418
19419 /* Version for PentiumII/PPro */
19420
19421 #define ROUND1(x) \
19422 + nop; nop; nop; \
19423 SRC(movl x(%esi), %ebx ) ; \
19424 addl %ebx, %eax ; \
19425 - DST(movl %ebx, x(%edi) ) ;
19426 + DST(movl %ebx, %es:x(%edi)) ;
19427
19428 #define ROUND(x) \
19429 + nop; nop; nop; \
19430 SRC(movl x(%esi), %ebx ) ; \
19431 adcl %ebx, %eax ; \
19432 - DST(movl %ebx, x(%edi) ) ;
19433 + DST(movl %ebx, %es:x(%edi)) ;
19434
19435 #define ARGBASE 12
19436 -
19437 -ENTRY(csum_partial_copy_generic)
19438 +
19439 +ENTRY(csum_partial_copy_generic_to_user)
19440 CFI_STARTPROC
19441 +
19442 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19443 + pushl_cfi %gs
19444 + popl_cfi %es
19445 + jmp csum_partial_copy_generic
19446 +#endif
19447 +
19448 +ENTRY(csum_partial_copy_generic_from_user)
19449 +
19450 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19451 + pushl_cfi %gs
19452 + popl_cfi %ds
19453 +#endif
19454 +
19455 +ENTRY(csum_partial_copy_generic)
19456 pushl_cfi %ebx
19457 CFI_REL_OFFSET ebx, 0
19458 pushl_cfi %edi
19459 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19460 subl %ebx, %edi
19461 lea -1(%esi),%edx
19462 andl $-32,%edx
19463 - lea 3f(%ebx,%ebx), %ebx
19464 + lea 3f(%ebx,%ebx,2), %ebx
19465 testl %esi, %esi
19466 jmp *%ebx
19467 1: addl $64,%esi
19468 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19469 jb 5f
19470 SRC( movw (%esi), %dx )
19471 leal 2(%esi), %esi
19472 -DST( movw %dx, (%edi) )
19473 +DST( movw %dx, %es:(%edi) )
19474 leal 2(%edi), %edi
19475 je 6f
19476 shll $16,%edx
19477 5:
19478 SRC( movb (%esi), %dl )
19479 -DST( movb %dl, (%edi) )
19480 +DST( movb %dl, %es:(%edi) )
19481 6: addl %edx, %eax
19482 adcl $0, %eax
19483 7:
19484 .section .fixup, "ax"
19485 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19486 - movl $-EFAULT, (%ebx)
19487 + movl $-EFAULT, %ss:(%ebx)
19488 # zero the complete destination (computing the rest is too much work)
19489 movl ARGBASE+8(%esp),%edi # dst
19490 movl ARGBASE+12(%esp),%ecx # len
19491 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19492 rep; stosb
19493 jmp 7b
19494 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19495 - movl $-EFAULT, (%ebx)
19496 + movl $-EFAULT, %ss:(%ebx)
19497 jmp 7b
19498 .previous
19499
19500 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19501 + pushl_cfi %ss
19502 + popl_cfi %ds
19503 + pushl_cfi %ss
19504 + popl_cfi %es
19505 +#endif
19506 +
19507 popl_cfi %esi
19508 CFI_RESTORE esi
19509 popl_cfi %edi
19510 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19511 CFI_RESTORE ebx
19512 ret
19513 CFI_ENDPROC
19514 -ENDPROC(csum_partial_copy_generic)
19515 +ENDPROC(csum_partial_copy_generic_to_user)
19516
19517 #undef ROUND
19518 #undef ROUND1
19519 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19520 index f2145cf..cea889d 100644
19521 --- a/arch/x86/lib/clear_page_64.S
19522 +++ b/arch/x86/lib/clear_page_64.S
19523 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19524 movl $4096/8,%ecx
19525 xorl %eax,%eax
19526 rep stosq
19527 + pax_force_retaddr
19528 ret
19529 CFI_ENDPROC
19530 ENDPROC(clear_page_c)
19531 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19532 movl $4096,%ecx
19533 xorl %eax,%eax
19534 rep stosb
19535 + pax_force_retaddr
19536 ret
19537 CFI_ENDPROC
19538 ENDPROC(clear_page_c_e)
19539 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19540 leaq 64(%rdi),%rdi
19541 jnz .Lloop
19542 nop
19543 + pax_force_retaddr
19544 ret
19545 CFI_ENDPROC
19546 .Lclear_page_end:
19547 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19548
19549 #include <asm/cpufeature.h>
19550
19551 - .section .altinstr_replacement,"ax"
19552 + .section .altinstr_replacement,"a"
19553 1: .byte 0xeb /* jmp <disp8> */
19554 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19555 2: .byte 0xeb /* jmp <disp8> */
19556 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19557 index 1e572c5..2a162cd 100644
19558 --- a/arch/x86/lib/cmpxchg16b_emu.S
19559 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19560 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19561
19562 popf
19563 mov $1, %al
19564 + pax_force_retaddr
19565 ret
19566
19567 not_same:
19568 popf
19569 xor %al,%al
19570 + pax_force_retaddr
19571 ret
19572
19573 CFI_ENDPROC
19574 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19575 index 01c805b..dccb07f 100644
19576 --- a/arch/x86/lib/copy_page_64.S
19577 +++ b/arch/x86/lib/copy_page_64.S
19578 @@ -9,6 +9,7 @@ copy_page_c:
19579 CFI_STARTPROC
19580 movl $4096/8,%ecx
19581 rep movsq
19582 + pax_force_retaddr
19583 ret
19584 CFI_ENDPROC
19585 ENDPROC(copy_page_c)
19586 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19587 movq 16 (%rsi), %rdx
19588 movq 24 (%rsi), %r8
19589 movq 32 (%rsi), %r9
19590 - movq 40 (%rsi), %r10
19591 + movq 40 (%rsi), %r13
19592 movq 48 (%rsi), %r11
19593 movq 56 (%rsi), %r12
19594
19595 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19596 movq %rdx, 16 (%rdi)
19597 movq %r8, 24 (%rdi)
19598 movq %r9, 32 (%rdi)
19599 - movq %r10, 40 (%rdi)
19600 + movq %r13, 40 (%rdi)
19601 movq %r11, 48 (%rdi)
19602 movq %r12, 56 (%rdi)
19603
19604 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19605 movq 16 (%rsi), %rdx
19606 movq 24 (%rsi), %r8
19607 movq 32 (%rsi), %r9
19608 - movq 40 (%rsi), %r10
19609 + movq 40 (%rsi), %r13
19610 movq 48 (%rsi), %r11
19611 movq 56 (%rsi), %r12
19612
19613 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19614 movq %rdx, 16 (%rdi)
19615 movq %r8, 24 (%rdi)
19616 movq %r9, 32 (%rdi)
19617 - movq %r10, 40 (%rdi)
19618 + movq %r13, 40 (%rdi)
19619 movq %r11, 48 (%rdi)
19620 movq %r12, 56 (%rdi)
19621
19622 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19623 CFI_RESTORE r13
19624 addq $3*8,%rsp
19625 CFI_ADJUST_CFA_OFFSET -3*8
19626 + pax_force_retaddr
19627 ret
19628 .Lcopy_page_end:
19629 CFI_ENDPROC
19630 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19631
19632 #include <asm/cpufeature.h>
19633
19634 - .section .altinstr_replacement,"ax"
19635 + .section .altinstr_replacement,"a"
19636 1: .byte 0xeb /* jmp <disp8> */
19637 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19638 2:
19639 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19640 index 0248402..821c786 100644
19641 --- a/arch/x86/lib/copy_user_64.S
19642 +++ b/arch/x86/lib/copy_user_64.S
19643 @@ -16,6 +16,7 @@
19644 #include <asm/thread_info.h>
19645 #include <asm/cpufeature.h>
19646 #include <asm/alternative-asm.h>
19647 +#include <asm/pgtable.h>
19648
19649 /*
19650 * By placing feature2 after feature1 in altinstructions section, we logically
19651 @@ -29,7 +30,7 @@
19652 .byte 0xe9 /* 32bit jump */
19653 .long \orig-1f /* by default jump to orig */
19654 1:
19655 - .section .altinstr_replacement,"ax"
19656 + .section .altinstr_replacement,"a"
19657 2: .byte 0xe9 /* near jump with 32bit immediate */
19658 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19659 3: .byte 0xe9 /* near jump with 32bit immediate */
19660 @@ -71,47 +72,20 @@
19661 #endif
19662 .endm
19663
19664 -/* Standard copy_to_user with segment limit checking */
19665 -ENTRY(_copy_to_user)
19666 - CFI_STARTPROC
19667 - GET_THREAD_INFO(%rax)
19668 - movq %rdi,%rcx
19669 - addq %rdx,%rcx
19670 - jc bad_to_user
19671 - cmpq TI_addr_limit(%rax),%rcx
19672 - ja bad_to_user
19673 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19674 - copy_user_generic_unrolled,copy_user_generic_string, \
19675 - copy_user_enhanced_fast_string
19676 - CFI_ENDPROC
19677 -ENDPROC(_copy_to_user)
19678 -
19679 -/* Standard copy_from_user with segment limit checking */
19680 -ENTRY(_copy_from_user)
19681 - CFI_STARTPROC
19682 - GET_THREAD_INFO(%rax)
19683 - movq %rsi,%rcx
19684 - addq %rdx,%rcx
19685 - jc bad_from_user
19686 - cmpq TI_addr_limit(%rax),%rcx
19687 - ja bad_from_user
19688 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19689 - copy_user_generic_unrolled,copy_user_generic_string, \
19690 - copy_user_enhanced_fast_string
19691 - CFI_ENDPROC
19692 -ENDPROC(_copy_from_user)
19693 -
19694 .section .fixup,"ax"
19695 /* must zero dest */
19696 ENTRY(bad_from_user)
19697 bad_from_user:
19698 CFI_STARTPROC
19699 + testl %edx,%edx
19700 + js bad_to_user
19701 movl %edx,%ecx
19702 xorl %eax,%eax
19703 rep
19704 stosb
19705 bad_to_user:
19706 movl %edx,%eax
19707 + pax_force_retaddr
19708 ret
19709 CFI_ENDPROC
19710 ENDPROC(bad_from_user)
19711 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19712 jz 17f
19713 1: movq (%rsi),%r8
19714 2: movq 1*8(%rsi),%r9
19715 -3: movq 2*8(%rsi),%r10
19716 +3: movq 2*8(%rsi),%rax
19717 4: movq 3*8(%rsi),%r11
19718 5: movq %r8,(%rdi)
19719 6: movq %r9,1*8(%rdi)
19720 -7: movq %r10,2*8(%rdi)
19721 +7: movq %rax,2*8(%rdi)
19722 8: movq %r11,3*8(%rdi)
19723 9: movq 4*8(%rsi),%r8
19724 10: movq 5*8(%rsi),%r9
19725 -11: movq 6*8(%rsi),%r10
19726 +11: movq 6*8(%rsi),%rax
19727 12: movq 7*8(%rsi),%r11
19728 13: movq %r8,4*8(%rdi)
19729 14: movq %r9,5*8(%rdi)
19730 -15: movq %r10,6*8(%rdi)
19731 +15: movq %rax,6*8(%rdi)
19732 16: movq %r11,7*8(%rdi)
19733 leaq 64(%rsi),%rsi
19734 leaq 64(%rdi),%rdi
19735 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19736 decl %ecx
19737 jnz 21b
19738 23: xor %eax,%eax
19739 + pax_force_retaddr
19740 ret
19741
19742 .section .fixup,"ax"
19743 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19744 3: rep
19745 movsb
19746 4: xorl %eax,%eax
19747 + pax_force_retaddr
19748 ret
19749
19750 .section .fixup,"ax"
19751 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19752 1: rep
19753 movsb
19754 2: xorl %eax,%eax
19755 + pax_force_retaddr
19756 ret
19757
19758 .section .fixup,"ax"
19759 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19760 index cb0c112..e3a6895 100644
19761 --- a/arch/x86/lib/copy_user_nocache_64.S
19762 +++ b/arch/x86/lib/copy_user_nocache_64.S
19763 @@ -8,12 +8,14 @@
19764
19765 #include <linux/linkage.h>
19766 #include <asm/dwarf2.h>
19767 +#include <asm/alternative-asm.h>
19768
19769 #define FIX_ALIGNMENT 1
19770
19771 #include <asm/current.h>
19772 #include <asm/asm-offsets.h>
19773 #include <asm/thread_info.h>
19774 +#include <asm/pgtable.h>
19775
19776 .macro ALIGN_DESTINATION
19777 #ifdef FIX_ALIGNMENT
19778 @@ -50,6 +52,15 @@
19779 */
19780 ENTRY(__copy_user_nocache)
19781 CFI_STARTPROC
19782 +
19783 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19784 + mov $PAX_USER_SHADOW_BASE,%rcx
19785 + cmp %rcx,%rsi
19786 + jae 1f
19787 + add %rcx,%rsi
19788 +1:
19789 +#endif
19790 +
19791 cmpl $8,%edx
19792 jb 20f /* less then 8 bytes, go to byte copy loop */
19793 ALIGN_DESTINATION
19794 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19795 jz 17f
19796 1: movq (%rsi),%r8
19797 2: movq 1*8(%rsi),%r9
19798 -3: movq 2*8(%rsi),%r10
19799 +3: movq 2*8(%rsi),%rax
19800 4: movq 3*8(%rsi),%r11
19801 5: movnti %r8,(%rdi)
19802 6: movnti %r9,1*8(%rdi)
19803 -7: movnti %r10,2*8(%rdi)
19804 +7: movnti %rax,2*8(%rdi)
19805 8: movnti %r11,3*8(%rdi)
19806 9: movq 4*8(%rsi),%r8
19807 10: movq 5*8(%rsi),%r9
19808 -11: movq 6*8(%rsi),%r10
19809 +11: movq 6*8(%rsi),%rax
19810 12: movq 7*8(%rsi),%r11
19811 13: movnti %r8,4*8(%rdi)
19812 14: movnti %r9,5*8(%rdi)
19813 -15: movnti %r10,6*8(%rdi)
19814 +15: movnti %rax,6*8(%rdi)
19815 16: movnti %r11,7*8(%rdi)
19816 leaq 64(%rsi),%rsi
19817 leaq 64(%rdi),%rdi
19818 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19819 jnz 21b
19820 23: xorl %eax,%eax
19821 sfence
19822 + pax_force_retaddr
19823 ret
19824
19825 .section .fixup,"ax"
19826 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19827 index fb903b7..c92b7f7 100644
19828 --- a/arch/x86/lib/csum-copy_64.S
19829 +++ b/arch/x86/lib/csum-copy_64.S
19830 @@ -8,6 +8,7 @@
19831 #include <linux/linkage.h>
19832 #include <asm/dwarf2.h>
19833 #include <asm/errno.h>
19834 +#include <asm/alternative-asm.h>
19835
19836 /*
19837 * Checksum copy with exception handling.
19838 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19839 CFI_RESTORE rbp
19840 addq $7*8, %rsp
19841 CFI_ADJUST_CFA_OFFSET -7*8
19842 + pax_force_retaddr 0, 1
19843 ret
19844 CFI_RESTORE_STATE
19845
19846 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19847 index 459b58a..9570bc7 100644
19848 --- a/arch/x86/lib/csum-wrappers_64.c
19849 +++ b/arch/x86/lib/csum-wrappers_64.c
19850 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19851 len -= 2;
19852 }
19853 }
19854 - isum = csum_partial_copy_generic((__force const void *)src,
19855 +
19856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19857 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19858 + src += PAX_USER_SHADOW_BASE;
19859 +#endif
19860 +
19861 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19862 dst, len, isum, errp, NULL);
19863 if (unlikely(*errp))
19864 goto out_err;
19865 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19866 }
19867
19868 *errp = 0;
19869 - return csum_partial_copy_generic(src, (void __force *)dst,
19870 +
19871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19872 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19873 + dst += PAX_USER_SHADOW_BASE;
19874 +#endif
19875 +
19876 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19877 len, isum, NULL, errp);
19878 }
19879 EXPORT_SYMBOL(csum_partial_copy_to_user);
19880 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19881 index 51f1504..ddac4c1 100644
19882 --- a/arch/x86/lib/getuser.S
19883 +++ b/arch/x86/lib/getuser.S
19884 @@ -33,15 +33,38 @@
19885 #include <asm/asm-offsets.h>
19886 #include <asm/thread_info.h>
19887 #include <asm/asm.h>
19888 +#include <asm/segment.h>
19889 +#include <asm/pgtable.h>
19890 +#include <asm/alternative-asm.h>
19891 +
19892 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19893 +#define __copyuser_seg gs;
19894 +#else
19895 +#define __copyuser_seg
19896 +#endif
19897
19898 .text
19899 ENTRY(__get_user_1)
19900 CFI_STARTPROC
19901 +
19902 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19903 GET_THREAD_INFO(%_ASM_DX)
19904 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19905 jae bad_get_user
19906 -1: movzb (%_ASM_AX),%edx
19907 +
19908 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19909 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19910 + cmp %_ASM_DX,%_ASM_AX
19911 + jae 1234f
19912 + add %_ASM_DX,%_ASM_AX
19913 +1234:
19914 +#endif
19915 +
19916 +#endif
19917 +
19918 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19919 xor %eax,%eax
19920 + pax_force_retaddr
19921 ret
19922 CFI_ENDPROC
19923 ENDPROC(__get_user_1)
19924 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19925 ENTRY(__get_user_2)
19926 CFI_STARTPROC
19927 add $1,%_ASM_AX
19928 +
19929 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19930 jc bad_get_user
19931 GET_THREAD_INFO(%_ASM_DX)
19932 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19933 jae bad_get_user
19934 -2: movzwl -1(%_ASM_AX),%edx
19935 +
19936 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19937 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19938 + cmp %_ASM_DX,%_ASM_AX
19939 + jae 1234f
19940 + add %_ASM_DX,%_ASM_AX
19941 +1234:
19942 +#endif
19943 +
19944 +#endif
19945 +
19946 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19947 xor %eax,%eax
19948 + pax_force_retaddr
19949 ret
19950 CFI_ENDPROC
19951 ENDPROC(__get_user_2)
19952 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19953 ENTRY(__get_user_4)
19954 CFI_STARTPROC
19955 add $3,%_ASM_AX
19956 +
19957 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19958 jc bad_get_user
19959 GET_THREAD_INFO(%_ASM_DX)
19960 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19961 jae bad_get_user
19962 -3: mov -3(%_ASM_AX),%edx
19963 +
19964 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19965 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19966 + cmp %_ASM_DX,%_ASM_AX
19967 + jae 1234f
19968 + add %_ASM_DX,%_ASM_AX
19969 +1234:
19970 +#endif
19971 +
19972 +#endif
19973 +
19974 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19975 xor %eax,%eax
19976 + pax_force_retaddr
19977 ret
19978 CFI_ENDPROC
19979 ENDPROC(__get_user_4)
19980 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19981 GET_THREAD_INFO(%_ASM_DX)
19982 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19983 jae bad_get_user
19984 +
19985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19986 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19987 + cmp %_ASM_DX,%_ASM_AX
19988 + jae 1234f
19989 + add %_ASM_DX,%_ASM_AX
19990 +1234:
19991 +#endif
19992 +
19993 4: movq -7(%_ASM_AX),%_ASM_DX
19994 xor %eax,%eax
19995 + pax_force_retaddr
19996 ret
19997 CFI_ENDPROC
19998 ENDPROC(__get_user_8)
19999 @@ -91,6 +152,7 @@ bad_get_user:
20000 CFI_STARTPROC
20001 xor %edx,%edx
20002 mov $(-EFAULT),%_ASM_AX
20003 + pax_force_retaddr
20004 ret
20005 CFI_ENDPROC
20006 END(bad_get_user)
20007 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
20008 index 374562e..a75830b 100644
20009 --- a/arch/x86/lib/insn.c
20010 +++ b/arch/x86/lib/insn.c
20011 @@ -21,6 +21,11 @@
20012 #include <linux/string.h>
20013 #include <asm/inat.h>
20014 #include <asm/insn.h>
20015 +#ifdef __KERNEL__
20016 +#include <asm/pgtable_types.h>
20017 +#else
20018 +#define ktla_ktva(addr) addr
20019 +#endif
20020
20021 /* Verify next sizeof(t) bytes can be on the same instruction */
20022 #define validate_next(t, insn, n) \
20023 @@ -49,8 +54,8 @@
20024 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
20025 {
20026 memset(insn, 0, sizeof(*insn));
20027 - insn->kaddr = kaddr;
20028 - insn->next_byte = kaddr;
20029 + insn->kaddr = ktla_ktva(kaddr);
20030 + insn->next_byte = ktla_ktva(kaddr);
20031 insn->x86_64 = x86_64 ? 1 : 0;
20032 insn->opnd_bytes = 4;
20033 if (x86_64)
20034 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
20035 index 05a95e7..326f2fa 100644
20036 --- a/arch/x86/lib/iomap_copy_64.S
20037 +++ b/arch/x86/lib/iomap_copy_64.S
20038 @@ -17,6 +17,7 @@
20039
20040 #include <linux/linkage.h>
20041 #include <asm/dwarf2.h>
20042 +#include <asm/alternative-asm.h>
20043
20044 /*
20045 * override generic version in lib/iomap_copy.c
20046 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
20047 CFI_STARTPROC
20048 movl %edx,%ecx
20049 rep movsd
20050 + pax_force_retaddr
20051 ret
20052 CFI_ENDPROC
20053 ENDPROC(__iowrite32_copy)
20054 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
20055 index efbf2a0..8893637 100644
20056 --- a/arch/x86/lib/memcpy_64.S
20057 +++ b/arch/x86/lib/memcpy_64.S
20058 @@ -34,6 +34,7 @@
20059 rep movsq
20060 movl %edx, %ecx
20061 rep movsb
20062 + pax_force_retaddr
20063 ret
20064 .Lmemcpy_e:
20065 .previous
20066 @@ -51,6 +52,7 @@
20067
20068 movl %edx, %ecx
20069 rep movsb
20070 + pax_force_retaddr
20071 ret
20072 .Lmemcpy_e_e:
20073 .previous
20074 @@ -81,13 +83,13 @@ ENTRY(memcpy)
20075 */
20076 movq 0*8(%rsi), %r8
20077 movq 1*8(%rsi), %r9
20078 - movq 2*8(%rsi), %r10
20079 + movq 2*8(%rsi), %rcx
20080 movq 3*8(%rsi), %r11
20081 leaq 4*8(%rsi), %rsi
20082
20083 movq %r8, 0*8(%rdi)
20084 movq %r9, 1*8(%rdi)
20085 - movq %r10, 2*8(%rdi)
20086 + movq %rcx, 2*8(%rdi)
20087 movq %r11, 3*8(%rdi)
20088 leaq 4*8(%rdi), %rdi
20089 jae .Lcopy_forward_loop
20090 @@ -110,12 +112,12 @@ ENTRY(memcpy)
20091 subq $0x20, %rdx
20092 movq -1*8(%rsi), %r8
20093 movq -2*8(%rsi), %r9
20094 - movq -3*8(%rsi), %r10
20095 + movq -3*8(%rsi), %rcx
20096 movq -4*8(%rsi), %r11
20097 leaq -4*8(%rsi), %rsi
20098 movq %r8, -1*8(%rdi)
20099 movq %r9, -2*8(%rdi)
20100 - movq %r10, -3*8(%rdi)
20101 + movq %rcx, -3*8(%rdi)
20102 movq %r11, -4*8(%rdi)
20103 leaq -4*8(%rdi), %rdi
20104 jae .Lcopy_backward_loop
20105 @@ -135,12 +137,13 @@ ENTRY(memcpy)
20106 */
20107 movq 0*8(%rsi), %r8
20108 movq 1*8(%rsi), %r9
20109 - movq -2*8(%rsi, %rdx), %r10
20110 + movq -2*8(%rsi, %rdx), %rcx
20111 movq -1*8(%rsi, %rdx), %r11
20112 movq %r8, 0*8(%rdi)
20113 movq %r9, 1*8(%rdi)
20114 - movq %r10, -2*8(%rdi, %rdx)
20115 + movq %rcx, -2*8(%rdi, %rdx)
20116 movq %r11, -1*8(%rdi, %rdx)
20117 + pax_force_retaddr
20118 retq
20119 .p2align 4
20120 .Lless_16bytes:
20121 @@ -153,6 +156,7 @@ ENTRY(memcpy)
20122 movq -1*8(%rsi, %rdx), %r9
20123 movq %r8, 0*8(%rdi)
20124 movq %r9, -1*8(%rdi, %rdx)
20125 + pax_force_retaddr
20126 retq
20127 .p2align 4
20128 .Lless_8bytes:
20129 @@ -166,6 +170,7 @@ ENTRY(memcpy)
20130 movl -4(%rsi, %rdx), %r8d
20131 movl %ecx, (%rdi)
20132 movl %r8d, -4(%rdi, %rdx)
20133 + pax_force_retaddr
20134 retq
20135 .p2align 4
20136 .Lless_3bytes:
20137 @@ -183,6 +188,7 @@ ENTRY(memcpy)
20138 jnz .Lloop_1
20139
20140 .Lend:
20141 + pax_force_retaddr
20142 retq
20143 CFI_ENDPROC
20144 ENDPROC(memcpy)
20145 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
20146 index ee16461..c39c199 100644
20147 --- a/arch/x86/lib/memmove_64.S
20148 +++ b/arch/x86/lib/memmove_64.S
20149 @@ -61,13 +61,13 @@ ENTRY(memmove)
20150 5:
20151 sub $0x20, %rdx
20152 movq 0*8(%rsi), %r11
20153 - movq 1*8(%rsi), %r10
20154 + movq 1*8(%rsi), %rcx
20155 movq 2*8(%rsi), %r9
20156 movq 3*8(%rsi), %r8
20157 leaq 4*8(%rsi), %rsi
20158
20159 movq %r11, 0*8(%rdi)
20160 - movq %r10, 1*8(%rdi)
20161 + movq %rcx, 1*8(%rdi)
20162 movq %r9, 2*8(%rdi)
20163 movq %r8, 3*8(%rdi)
20164 leaq 4*8(%rdi), %rdi
20165 @@ -81,10 +81,10 @@ ENTRY(memmove)
20166 4:
20167 movq %rdx, %rcx
20168 movq -8(%rsi, %rdx), %r11
20169 - lea -8(%rdi, %rdx), %r10
20170 + lea -8(%rdi, %rdx), %r9
20171 shrq $3, %rcx
20172 rep movsq
20173 - movq %r11, (%r10)
20174 + movq %r11, (%r9)
20175 jmp 13f
20176 .Lmemmove_end_forward:
20177
20178 @@ -95,14 +95,14 @@ ENTRY(memmove)
20179 7:
20180 movq %rdx, %rcx
20181 movq (%rsi), %r11
20182 - movq %rdi, %r10
20183 + movq %rdi, %r9
20184 leaq -8(%rsi, %rdx), %rsi
20185 leaq -8(%rdi, %rdx), %rdi
20186 shrq $3, %rcx
20187 std
20188 rep movsq
20189 cld
20190 - movq %r11, (%r10)
20191 + movq %r11, (%r9)
20192 jmp 13f
20193
20194 /*
20195 @@ -127,13 +127,13 @@ ENTRY(memmove)
20196 8:
20197 subq $0x20, %rdx
20198 movq -1*8(%rsi), %r11
20199 - movq -2*8(%rsi), %r10
20200 + movq -2*8(%rsi), %rcx
20201 movq -3*8(%rsi), %r9
20202 movq -4*8(%rsi), %r8
20203 leaq -4*8(%rsi), %rsi
20204
20205 movq %r11, -1*8(%rdi)
20206 - movq %r10, -2*8(%rdi)
20207 + movq %rcx, -2*8(%rdi)
20208 movq %r9, -3*8(%rdi)
20209 movq %r8, -4*8(%rdi)
20210 leaq -4*8(%rdi), %rdi
20211 @@ -151,11 +151,11 @@ ENTRY(memmove)
20212 * Move data from 16 bytes to 31 bytes.
20213 */
20214 movq 0*8(%rsi), %r11
20215 - movq 1*8(%rsi), %r10
20216 + movq 1*8(%rsi), %rcx
20217 movq -2*8(%rsi, %rdx), %r9
20218 movq -1*8(%rsi, %rdx), %r8
20219 movq %r11, 0*8(%rdi)
20220 - movq %r10, 1*8(%rdi)
20221 + movq %rcx, 1*8(%rdi)
20222 movq %r9, -2*8(%rdi, %rdx)
20223 movq %r8, -1*8(%rdi, %rdx)
20224 jmp 13f
20225 @@ -167,9 +167,9 @@ ENTRY(memmove)
20226 * Move data from 8 bytes to 15 bytes.
20227 */
20228 movq 0*8(%rsi), %r11
20229 - movq -1*8(%rsi, %rdx), %r10
20230 + movq -1*8(%rsi, %rdx), %r9
20231 movq %r11, 0*8(%rdi)
20232 - movq %r10, -1*8(%rdi, %rdx)
20233 + movq %r9, -1*8(%rdi, %rdx)
20234 jmp 13f
20235 10:
20236 cmpq $4, %rdx
20237 @@ -178,9 +178,9 @@ ENTRY(memmove)
20238 * Move data from 4 bytes to 7 bytes.
20239 */
20240 movl (%rsi), %r11d
20241 - movl -4(%rsi, %rdx), %r10d
20242 + movl -4(%rsi, %rdx), %r9d
20243 movl %r11d, (%rdi)
20244 - movl %r10d, -4(%rdi, %rdx)
20245 + movl %r9d, -4(%rdi, %rdx)
20246 jmp 13f
20247 11:
20248 cmp $2, %rdx
20249 @@ -189,9 +189,9 @@ ENTRY(memmove)
20250 * Move data from 2 bytes to 3 bytes.
20251 */
20252 movw (%rsi), %r11w
20253 - movw -2(%rsi, %rdx), %r10w
20254 + movw -2(%rsi, %rdx), %r9w
20255 movw %r11w, (%rdi)
20256 - movw %r10w, -2(%rdi, %rdx)
20257 + movw %r9w, -2(%rdi, %rdx)
20258 jmp 13f
20259 12:
20260 cmp $1, %rdx
20261 @@ -202,6 +202,7 @@ ENTRY(memmove)
20262 movb (%rsi), %r11b
20263 movb %r11b, (%rdi)
20264 13:
20265 + pax_force_retaddr
20266 retq
20267 CFI_ENDPROC
20268
20269 @@ -210,6 +211,7 @@ ENTRY(memmove)
20270 /* Forward moving data. */
20271 movq %rdx, %rcx
20272 rep movsb
20273 + pax_force_retaddr
20274 retq
20275 .Lmemmove_end_forward_efs:
20276 .previous
20277 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20278 index 79bd454..dff325a 100644
20279 --- a/arch/x86/lib/memset_64.S
20280 +++ b/arch/x86/lib/memset_64.S
20281 @@ -31,6 +31,7 @@
20282 movl %r8d,%ecx
20283 rep stosb
20284 movq %r9,%rax
20285 + pax_force_retaddr
20286 ret
20287 .Lmemset_e:
20288 .previous
20289 @@ -53,6 +54,7 @@
20290 movl %edx,%ecx
20291 rep stosb
20292 movq %r9,%rax
20293 + pax_force_retaddr
20294 ret
20295 .Lmemset_e_e:
20296 .previous
20297 @@ -60,13 +62,13 @@
20298 ENTRY(memset)
20299 ENTRY(__memset)
20300 CFI_STARTPROC
20301 - movq %rdi,%r10
20302 movq %rdx,%r11
20303
20304 /* expand byte value */
20305 movzbl %sil,%ecx
20306 movabs $0x0101010101010101,%rax
20307 mul %rcx /* with rax, clobbers rdx */
20308 + movq %rdi,%rdx
20309
20310 /* align dst */
20311 movl %edi,%r9d
20312 @@ -120,7 +122,8 @@ ENTRY(__memset)
20313 jnz .Lloop_1
20314
20315 .Lende:
20316 - movq %r10,%rax
20317 + movq %rdx,%rax
20318 + pax_force_retaddr
20319 ret
20320
20321 CFI_RESTORE_STATE
20322 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20323 index c9f2d9b..e7fd2c0 100644
20324 --- a/arch/x86/lib/mmx_32.c
20325 +++ b/arch/x86/lib/mmx_32.c
20326 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20327 {
20328 void *p;
20329 int i;
20330 + unsigned long cr0;
20331
20332 if (unlikely(in_interrupt()))
20333 return __memcpy(to, from, len);
20334 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20335 kernel_fpu_begin();
20336
20337 __asm__ __volatile__ (
20338 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20339 - " prefetch 64(%0)\n"
20340 - " prefetch 128(%0)\n"
20341 - " prefetch 192(%0)\n"
20342 - " prefetch 256(%0)\n"
20343 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20344 + " prefetch 64(%1)\n"
20345 + " prefetch 128(%1)\n"
20346 + " prefetch 192(%1)\n"
20347 + " prefetch 256(%1)\n"
20348 "2: \n"
20349 ".section .fixup, \"ax\"\n"
20350 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20351 + "3: \n"
20352 +
20353 +#ifdef CONFIG_PAX_KERNEXEC
20354 + " movl %%cr0, %0\n"
20355 + " movl %0, %%eax\n"
20356 + " andl $0xFFFEFFFF, %%eax\n"
20357 + " movl %%eax, %%cr0\n"
20358 +#endif
20359 +
20360 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20361 +
20362 +#ifdef CONFIG_PAX_KERNEXEC
20363 + " movl %0, %%cr0\n"
20364 +#endif
20365 +
20366 " jmp 2b\n"
20367 ".previous\n"
20368 _ASM_EXTABLE(1b, 3b)
20369 - : : "r" (from));
20370 + : "=&r" (cr0) : "r" (from) : "ax");
20371
20372 for ( ; i > 5; i--) {
20373 __asm__ __volatile__ (
20374 - "1: prefetch 320(%0)\n"
20375 - "2: movq (%0), %%mm0\n"
20376 - " movq 8(%0), %%mm1\n"
20377 - " movq 16(%0), %%mm2\n"
20378 - " movq 24(%0), %%mm3\n"
20379 - " movq %%mm0, (%1)\n"
20380 - " movq %%mm1, 8(%1)\n"
20381 - " movq %%mm2, 16(%1)\n"
20382 - " movq %%mm3, 24(%1)\n"
20383 - " movq 32(%0), %%mm0\n"
20384 - " movq 40(%0), %%mm1\n"
20385 - " movq 48(%0), %%mm2\n"
20386 - " movq 56(%0), %%mm3\n"
20387 - " movq %%mm0, 32(%1)\n"
20388 - " movq %%mm1, 40(%1)\n"
20389 - " movq %%mm2, 48(%1)\n"
20390 - " movq %%mm3, 56(%1)\n"
20391 + "1: prefetch 320(%1)\n"
20392 + "2: movq (%1), %%mm0\n"
20393 + " movq 8(%1), %%mm1\n"
20394 + " movq 16(%1), %%mm2\n"
20395 + " movq 24(%1), %%mm3\n"
20396 + " movq %%mm0, (%2)\n"
20397 + " movq %%mm1, 8(%2)\n"
20398 + " movq %%mm2, 16(%2)\n"
20399 + " movq %%mm3, 24(%2)\n"
20400 + " movq 32(%1), %%mm0\n"
20401 + " movq 40(%1), %%mm1\n"
20402 + " movq 48(%1), %%mm2\n"
20403 + " movq 56(%1), %%mm3\n"
20404 + " movq %%mm0, 32(%2)\n"
20405 + " movq %%mm1, 40(%2)\n"
20406 + " movq %%mm2, 48(%2)\n"
20407 + " movq %%mm3, 56(%2)\n"
20408 ".section .fixup, \"ax\"\n"
20409 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20410 + "3:\n"
20411 +
20412 +#ifdef CONFIG_PAX_KERNEXEC
20413 + " movl %%cr0, %0\n"
20414 + " movl %0, %%eax\n"
20415 + " andl $0xFFFEFFFF, %%eax\n"
20416 + " movl %%eax, %%cr0\n"
20417 +#endif
20418 +
20419 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20420 +
20421 +#ifdef CONFIG_PAX_KERNEXEC
20422 + " movl %0, %%cr0\n"
20423 +#endif
20424 +
20425 " jmp 2b\n"
20426 ".previous\n"
20427 _ASM_EXTABLE(1b, 3b)
20428 - : : "r" (from), "r" (to) : "memory");
20429 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20430
20431 from += 64;
20432 to += 64;
20433 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20434 static void fast_copy_page(void *to, void *from)
20435 {
20436 int i;
20437 + unsigned long cr0;
20438
20439 kernel_fpu_begin();
20440
20441 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20442 * but that is for later. -AV
20443 */
20444 __asm__ __volatile__(
20445 - "1: prefetch (%0)\n"
20446 - " prefetch 64(%0)\n"
20447 - " prefetch 128(%0)\n"
20448 - " prefetch 192(%0)\n"
20449 - " prefetch 256(%0)\n"
20450 + "1: prefetch (%1)\n"
20451 + " prefetch 64(%1)\n"
20452 + " prefetch 128(%1)\n"
20453 + " prefetch 192(%1)\n"
20454 + " prefetch 256(%1)\n"
20455 "2: \n"
20456 ".section .fixup, \"ax\"\n"
20457 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20458 + "3: \n"
20459 +
20460 +#ifdef CONFIG_PAX_KERNEXEC
20461 + " movl %%cr0, %0\n"
20462 + " movl %0, %%eax\n"
20463 + " andl $0xFFFEFFFF, %%eax\n"
20464 + " movl %%eax, %%cr0\n"
20465 +#endif
20466 +
20467 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20468 +
20469 +#ifdef CONFIG_PAX_KERNEXEC
20470 + " movl %0, %%cr0\n"
20471 +#endif
20472 +
20473 " jmp 2b\n"
20474 ".previous\n"
20475 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20476 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20477
20478 for (i = 0; i < (4096-320)/64; i++) {
20479 __asm__ __volatile__ (
20480 - "1: prefetch 320(%0)\n"
20481 - "2: movq (%0), %%mm0\n"
20482 - " movntq %%mm0, (%1)\n"
20483 - " movq 8(%0), %%mm1\n"
20484 - " movntq %%mm1, 8(%1)\n"
20485 - " movq 16(%0), %%mm2\n"
20486 - " movntq %%mm2, 16(%1)\n"
20487 - " movq 24(%0), %%mm3\n"
20488 - " movntq %%mm3, 24(%1)\n"
20489 - " movq 32(%0), %%mm4\n"
20490 - " movntq %%mm4, 32(%1)\n"
20491 - " movq 40(%0), %%mm5\n"
20492 - " movntq %%mm5, 40(%1)\n"
20493 - " movq 48(%0), %%mm6\n"
20494 - " movntq %%mm6, 48(%1)\n"
20495 - " movq 56(%0), %%mm7\n"
20496 - " movntq %%mm7, 56(%1)\n"
20497 + "1: prefetch 320(%1)\n"
20498 + "2: movq (%1), %%mm0\n"
20499 + " movntq %%mm0, (%2)\n"
20500 + " movq 8(%1), %%mm1\n"
20501 + " movntq %%mm1, 8(%2)\n"
20502 + " movq 16(%1), %%mm2\n"
20503 + " movntq %%mm2, 16(%2)\n"
20504 + " movq 24(%1), %%mm3\n"
20505 + " movntq %%mm3, 24(%2)\n"
20506 + " movq 32(%1), %%mm4\n"
20507 + " movntq %%mm4, 32(%2)\n"
20508 + " movq 40(%1), %%mm5\n"
20509 + " movntq %%mm5, 40(%2)\n"
20510 + " movq 48(%1), %%mm6\n"
20511 + " movntq %%mm6, 48(%2)\n"
20512 + " movq 56(%1), %%mm7\n"
20513 + " movntq %%mm7, 56(%2)\n"
20514 ".section .fixup, \"ax\"\n"
20515 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20516 + "3:\n"
20517 +
20518 +#ifdef CONFIG_PAX_KERNEXEC
20519 + " movl %%cr0, %0\n"
20520 + " movl %0, %%eax\n"
20521 + " andl $0xFFFEFFFF, %%eax\n"
20522 + " movl %%eax, %%cr0\n"
20523 +#endif
20524 +
20525 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20526 +
20527 +#ifdef CONFIG_PAX_KERNEXEC
20528 + " movl %0, %%cr0\n"
20529 +#endif
20530 +
20531 " jmp 2b\n"
20532 ".previous\n"
20533 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20534 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20535
20536 from += 64;
20537 to += 64;
20538 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20539 static void fast_copy_page(void *to, void *from)
20540 {
20541 int i;
20542 + unsigned long cr0;
20543
20544 kernel_fpu_begin();
20545
20546 __asm__ __volatile__ (
20547 - "1: prefetch (%0)\n"
20548 - " prefetch 64(%0)\n"
20549 - " prefetch 128(%0)\n"
20550 - " prefetch 192(%0)\n"
20551 - " prefetch 256(%0)\n"
20552 + "1: prefetch (%1)\n"
20553 + " prefetch 64(%1)\n"
20554 + " prefetch 128(%1)\n"
20555 + " prefetch 192(%1)\n"
20556 + " prefetch 256(%1)\n"
20557 "2: \n"
20558 ".section .fixup, \"ax\"\n"
20559 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20560 + "3: \n"
20561 +
20562 +#ifdef CONFIG_PAX_KERNEXEC
20563 + " movl %%cr0, %0\n"
20564 + " movl %0, %%eax\n"
20565 + " andl $0xFFFEFFFF, %%eax\n"
20566 + " movl %%eax, %%cr0\n"
20567 +#endif
20568 +
20569 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20570 +
20571 +#ifdef CONFIG_PAX_KERNEXEC
20572 + " movl %0, %%cr0\n"
20573 +#endif
20574 +
20575 " jmp 2b\n"
20576 ".previous\n"
20577 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20578 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20579
20580 for (i = 0; i < 4096/64; i++) {
20581 __asm__ __volatile__ (
20582 - "1: prefetch 320(%0)\n"
20583 - "2: movq (%0), %%mm0\n"
20584 - " movq 8(%0), %%mm1\n"
20585 - " movq 16(%0), %%mm2\n"
20586 - " movq 24(%0), %%mm3\n"
20587 - " movq %%mm0, (%1)\n"
20588 - " movq %%mm1, 8(%1)\n"
20589 - " movq %%mm2, 16(%1)\n"
20590 - " movq %%mm3, 24(%1)\n"
20591 - " movq 32(%0), %%mm0\n"
20592 - " movq 40(%0), %%mm1\n"
20593 - " movq 48(%0), %%mm2\n"
20594 - " movq 56(%0), %%mm3\n"
20595 - " movq %%mm0, 32(%1)\n"
20596 - " movq %%mm1, 40(%1)\n"
20597 - " movq %%mm2, 48(%1)\n"
20598 - " movq %%mm3, 56(%1)\n"
20599 + "1: prefetch 320(%1)\n"
20600 + "2: movq (%1), %%mm0\n"
20601 + " movq 8(%1), %%mm1\n"
20602 + " movq 16(%1), %%mm2\n"
20603 + " movq 24(%1), %%mm3\n"
20604 + " movq %%mm0, (%2)\n"
20605 + " movq %%mm1, 8(%2)\n"
20606 + " movq %%mm2, 16(%2)\n"
20607 + " movq %%mm3, 24(%2)\n"
20608 + " movq 32(%1), %%mm0\n"
20609 + " movq 40(%1), %%mm1\n"
20610 + " movq 48(%1), %%mm2\n"
20611 + " movq 56(%1), %%mm3\n"
20612 + " movq %%mm0, 32(%2)\n"
20613 + " movq %%mm1, 40(%2)\n"
20614 + " movq %%mm2, 48(%2)\n"
20615 + " movq %%mm3, 56(%2)\n"
20616 ".section .fixup, \"ax\"\n"
20617 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20618 + "3:\n"
20619 +
20620 +#ifdef CONFIG_PAX_KERNEXEC
20621 + " movl %%cr0, %0\n"
20622 + " movl %0, %%eax\n"
20623 + " andl $0xFFFEFFFF, %%eax\n"
20624 + " movl %%eax, %%cr0\n"
20625 +#endif
20626 +
20627 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20628 +
20629 +#ifdef CONFIG_PAX_KERNEXEC
20630 + " movl %0, %%cr0\n"
20631 +#endif
20632 +
20633 " jmp 2b\n"
20634 ".previous\n"
20635 _ASM_EXTABLE(1b, 3b)
20636 - : : "r" (from), "r" (to) : "memory");
20637 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20638
20639 from += 64;
20640 to += 64;
20641 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20642 index 69fa106..adda88b 100644
20643 --- a/arch/x86/lib/msr-reg.S
20644 +++ b/arch/x86/lib/msr-reg.S
20645 @@ -3,6 +3,7 @@
20646 #include <asm/dwarf2.h>
20647 #include <asm/asm.h>
20648 #include <asm/msr.h>
20649 +#include <asm/alternative-asm.h>
20650
20651 #ifdef CONFIG_X86_64
20652 /*
20653 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20654 CFI_STARTPROC
20655 pushq_cfi %rbx
20656 pushq_cfi %rbp
20657 - movq %rdi, %r10 /* Save pointer */
20658 + movq %rdi, %r9 /* Save pointer */
20659 xorl %r11d, %r11d /* Return value */
20660 movl (%rdi), %eax
20661 movl 4(%rdi), %ecx
20662 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20663 movl 28(%rdi), %edi
20664 CFI_REMEMBER_STATE
20665 1: \op
20666 -2: movl %eax, (%r10)
20667 +2: movl %eax, (%r9)
20668 movl %r11d, %eax /* Return value */
20669 - movl %ecx, 4(%r10)
20670 - movl %edx, 8(%r10)
20671 - movl %ebx, 12(%r10)
20672 - movl %ebp, 20(%r10)
20673 - movl %esi, 24(%r10)
20674 - movl %edi, 28(%r10)
20675 + movl %ecx, 4(%r9)
20676 + movl %edx, 8(%r9)
20677 + movl %ebx, 12(%r9)
20678 + movl %ebp, 20(%r9)
20679 + movl %esi, 24(%r9)
20680 + movl %edi, 28(%r9)
20681 popq_cfi %rbp
20682 popq_cfi %rbx
20683 + pax_force_retaddr
20684 ret
20685 3:
20686 CFI_RESTORE_STATE
20687 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20688 index 36b0d15..d381858 100644
20689 --- a/arch/x86/lib/putuser.S
20690 +++ b/arch/x86/lib/putuser.S
20691 @@ -15,7 +15,9 @@
20692 #include <asm/thread_info.h>
20693 #include <asm/errno.h>
20694 #include <asm/asm.h>
20695 -
20696 +#include <asm/segment.h>
20697 +#include <asm/pgtable.h>
20698 +#include <asm/alternative-asm.h>
20699
20700 /*
20701 * __put_user_X
20702 @@ -29,52 +31,119 @@
20703 * as they get called from within inline assembly.
20704 */
20705
20706 -#define ENTER CFI_STARTPROC ; \
20707 - GET_THREAD_INFO(%_ASM_BX)
20708 -#define EXIT ret ; \
20709 +#define ENTER CFI_STARTPROC
20710 +#define EXIT pax_force_retaddr; ret ; \
20711 CFI_ENDPROC
20712
20713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20714 +#define _DEST %_ASM_CX,%_ASM_BX
20715 +#else
20716 +#define _DEST %_ASM_CX
20717 +#endif
20718 +
20719 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20720 +#define __copyuser_seg gs;
20721 +#else
20722 +#define __copyuser_seg
20723 +#endif
20724 +
20725 .text
20726 ENTRY(__put_user_1)
20727 ENTER
20728 +
20729 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20730 + GET_THREAD_INFO(%_ASM_BX)
20731 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20732 jae bad_put_user
20733 -1: movb %al,(%_ASM_CX)
20734 +
20735 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20736 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20737 + cmp %_ASM_BX,%_ASM_CX
20738 + jb 1234f
20739 + xor %ebx,%ebx
20740 +1234:
20741 +#endif
20742 +
20743 +#endif
20744 +
20745 +1: __copyuser_seg movb %al,(_DEST)
20746 xor %eax,%eax
20747 EXIT
20748 ENDPROC(__put_user_1)
20749
20750 ENTRY(__put_user_2)
20751 ENTER
20752 +
20753 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20754 + GET_THREAD_INFO(%_ASM_BX)
20755 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20756 sub $1,%_ASM_BX
20757 cmp %_ASM_BX,%_ASM_CX
20758 jae bad_put_user
20759 -2: movw %ax,(%_ASM_CX)
20760 +
20761 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20762 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20763 + cmp %_ASM_BX,%_ASM_CX
20764 + jb 1234f
20765 + xor %ebx,%ebx
20766 +1234:
20767 +#endif
20768 +
20769 +#endif
20770 +
20771 +2: __copyuser_seg movw %ax,(_DEST)
20772 xor %eax,%eax
20773 EXIT
20774 ENDPROC(__put_user_2)
20775
20776 ENTRY(__put_user_4)
20777 ENTER
20778 +
20779 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20780 + GET_THREAD_INFO(%_ASM_BX)
20781 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20782 sub $3,%_ASM_BX
20783 cmp %_ASM_BX,%_ASM_CX
20784 jae bad_put_user
20785 -3: movl %eax,(%_ASM_CX)
20786 +
20787 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20788 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20789 + cmp %_ASM_BX,%_ASM_CX
20790 + jb 1234f
20791 + xor %ebx,%ebx
20792 +1234:
20793 +#endif
20794 +
20795 +#endif
20796 +
20797 +3: __copyuser_seg movl %eax,(_DEST)
20798 xor %eax,%eax
20799 EXIT
20800 ENDPROC(__put_user_4)
20801
20802 ENTRY(__put_user_8)
20803 ENTER
20804 +
20805 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20806 + GET_THREAD_INFO(%_ASM_BX)
20807 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20808 sub $7,%_ASM_BX
20809 cmp %_ASM_BX,%_ASM_CX
20810 jae bad_put_user
20811 -4: mov %_ASM_AX,(%_ASM_CX)
20812 +
20813 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20814 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20815 + cmp %_ASM_BX,%_ASM_CX
20816 + jb 1234f
20817 + xor %ebx,%ebx
20818 +1234:
20819 +#endif
20820 +
20821 +#endif
20822 +
20823 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20824 #ifdef CONFIG_X86_32
20825 -5: movl %edx,4(%_ASM_CX)
20826 +5: __copyuser_seg movl %edx,4(_DEST)
20827 #endif
20828 xor %eax,%eax
20829 EXIT
20830 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20831 index 1cad221..de671ee 100644
20832 --- a/arch/x86/lib/rwlock.S
20833 +++ b/arch/x86/lib/rwlock.S
20834 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20835 FRAME
20836 0: LOCK_PREFIX
20837 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20838 +
20839 +#ifdef CONFIG_PAX_REFCOUNT
20840 + jno 1234f
20841 + LOCK_PREFIX
20842 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20843 + int $4
20844 +1234:
20845 + _ASM_EXTABLE(1234b, 1234b)
20846 +#endif
20847 +
20848 1: rep; nop
20849 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20850 jne 1b
20851 LOCK_PREFIX
20852 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20853 +
20854 +#ifdef CONFIG_PAX_REFCOUNT
20855 + jno 1234f
20856 + LOCK_PREFIX
20857 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20858 + int $4
20859 +1234:
20860 + _ASM_EXTABLE(1234b, 1234b)
20861 +#endif
20862 +
20863 jnz 0b
20864 ENDFRAME
20865 + pax_force_retaddr
20866 ret
20867 CFI_ENDPROC
20868 END(__write_lock_failed)
20869 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20870 FRAME
20871 0: LOCK_PREFIX
20872 READ_LOCK_SIZE(inc) (%__lock_ptr)
20873 +
20874 +#ifdef CONFIG_PAX_REFCOUNT
20875 + jno 1234f
20876 + LOCK_PREFIX
20877 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20878 + int $4
20879 +1234:
20880 + _ASM_EXTABLE(1234b, 1234b)
20881 +#endif
20882 +
20883 1: rep; nop
20884 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20885 js 1b
20886 LOCK_PREFIX
20887 READ_LOCK_SIZE(dec) (%__lock_ptr)
20888 +
20889 +#ifdef CONFIG_PAX_REFCOUNT
20890 + jno 1234f
20891 + LOCK_PREFIX
20892 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20893 + int $4
20894 +1234:
20895 + _ASM_EXTABLE(1234b, 1234b)
20896 +#endif
20897 +
20898 js 0b
20899 ENDFRAME
20900 + pax_force_retaddr
20901 ret
20902 CFI_ENDPROC
20903 END(__read_lock_failed)
20904 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20905 index 5dff5f0..cadebf4 100644
20906 --- a/arch/x86/lib/rwsem.S
20907 +++ b/arch/x86/lib/rwsem.S
20908 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20909 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20910 CFI_RESTORE __ASM_REG(dx)
20911 restore_common_regs
20912 + pax_force_retaddr
20913 ret
20914 CFI_ENDPROC
20915 ENDPROC(call_rwsem_down_read_failed)
20916 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20917 movq %rax,%rdi
20918 call rwsem_down_write_failed
20919 restore_common_regs
20920 + pax_force_retaddr
20921 ret
20922 CFI_ENDPROC
20923 ENDPROC(call_rwsem_down_write_failed)
20924 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20925 movq %rax,%rdi
20926 call rwsem_wake
20927 restore_common_regs
20928 -1: ret
20929 +1: pax_force_retaddr
20930 + ret
20931 CFI_ENDPROC
20932 ENDPROC(call_rwsem_wake)
20933
20934 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20935 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20936 CFI_RESTORE __ASM_REG(dx)
20937 restore_common_regs
20938 + pax_force_retaddr
20939 ret
20940 CFI_ENDPROC
20941 ENDPROC(call_rwsem_downgrade_wake)
20942 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20943 index a63efd6..ccecad8 100644
20944 --- a/arch/x86/lib/thunk_64.S
20945 +++ b/arch/x86/lib/thunk_64.S
20946 @@ -8,6 +8,7 @@
20947 #include <linux/linkage.h>
20948 #include <asm/dwarf2.h>
20949 #include <asm/calling.h>
20950 +#include <asm/alternative-asm.h>
20951
20952 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20953 .macro THUNK name, func, put_ret_addr_in_rdi=0
20954 @@ -41,5 +42,6 @@
20955 SAVE_ARGS
20956 restore:
20957 RESTORE_ARGS
20958 + pax_force_retaddr
20959 ret
20960 CFI_ENDPROC
20961 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20962 index e218d5d..35679b4 100644
20963 --- a/arch/x86/lib/usercopy_32.c
20964 +++ b/arch/x86/lib/usercopy_32.c
20965 @@ -43,7 +43,7 @@ do { \
20966 __asm__ __volatile__( \
20967 " testl %1,%1\n" \
20968 " jz 2f\n" \
20969 - "0: lodsb\n" \
20970 + "0: "__copyuser_seg"lodsb\n" \
20971 " stosb\n" \
20972 " testb %%al,%%al\n" \
20973 " jz 1f\n" \
20974 @@ -128,10 +128,12 @@ do { \
20975 int __d0; \
20976 might_fault(); \
20977 __asm__ __volatile__( \
20978 + __COPYUSER_SET_ES \
20979 "0: rep; stosl\n" \
20980 " movl %2,%0\n" \
20981 "1: rep; stosb\n" \
20982 "2:\n" \
20983 + __COPYUSER_RESTORE_ES \
20984 ".section .fixup,\"ax\"\n" \
20985 "3: lea 0(%2,%0,4),%0\n" \
20986 " jmp 2b\n" \
20987 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20988 might_fault();
20989
20990 __asm__ __volatile__(
20991 + __COPYUSER_SET_ES
20992 " testl %0, %0\n"
20993 " jz 3f\n"
20994 " andl %0,%%ecx\n"
20995 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20996 " subl %%ecx,%0\n"
20997 " addl %0,%%eax\n"
20998 "1:\n"
20999 + __COPYUSER_RESTORE_ES
21000 ".section .fixup,\"ax\"\n"
21001 "2: xorl %%eax,%%eax\n"
21002 " jmp 1b\n"
21003 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
21004
21005 #ifdef CONFIG_X86_INTEL_USERCOPY
21006 static unsigned long
21007 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
21008 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
21009 {
21010 int d0, d1;
21011 __asm__ __volatile__(
21012 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21013 " .align 2,0x90\n"
21014 "3: movl 0(%4), %%eax\n"
21015 "4: movl 4(%4), %%edx\n"
21016 - "5: movl %%eax, 0(%3)\n"
21017 - "6: movl %%edx, 4(%3)\n"
21018 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
21019 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
21020 "7: movl 8(%4), %%eax\n"
21021 "8: movl 12(%4),%%edx\n"
21022 - "9: movl %%eax, 8(%3)\n"
21023 - "10: movl %%edx, 12(%3)\n"
21024 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
21025 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
21026 "11: movl 16(%4), %%eax\n"
21027 "12: movl 20(%4), %%edx\n"
21028 - "13: movl %%eax, 16(%3)\n"
21029 - "14: movl %%edx, 20(%3)\n"
21030 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
21031 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
21032 "15: movl 24(%4), %%eax\n"
21033 "16: movl 28(%4), %%edx\n"
21034 - "17: movl %%eax, 24(%3)\n"
21035 - "18: movl %%edx, 28(%3)\n"
21036 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
21037 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
21038 "19: movl 32(%4), %%eax\n"
21039 "20: movl 36(%4), %%edx\n"
21040 - "21: movl %%eax, 32(%3)\n"
21041 - "22: movl %%edx, 36(%3)\n"
21042 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
21043 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
21044 "23: movl 40(%4), %%eax\n"
21045 "24: movl 44(%4), %%edx\n"
21046 - "25: movl %%eax, 40(%3)\n"
21047 - "26: movl %%edx, 44(%3)\n"
21048 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
21049 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
21050 "27: movl 48(%4), %%eax\n"
21051 "28: movl 52(%4), %%edx\n"
21052 - "29: movl %%eax, 48(%3)\n"
21053 - "30: movl %%edx, 52(%3)\n"
21054 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
21055 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
21056 "31: movl 56(%4), %%eax\n"
21057 "32: movl 60(%4), %%edx\n"
21058 - "33: movl %%eax, 56(%3)\n"
21059 - "34: movl %%edx, 60(%3)\n"
21060 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
21061 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
21062 " addl $-64, %0\n"
21063 " addl $64, %4\n"
21064 " addl $64, %3\n"
21065 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
21066 " shrl $2, %0\n"
21067 " andl $3, %%eax\n"
21068 " cld\n"
21069 + __COPYUSER_SET_ES
21070 "99: rep; movsl\n"
21071 "36: movl %%eax, %0\n"
21072 "37: rep; movsb\n"
21073 "100:\n"
21074 + __COPYUSER_RESTORE_ES
21075 + ".section .fixup,\"ax\"\n"
21076 + "101: lea 0(%%eax,%0,4),%0\n"
21077 + " jmp 100b\n"
21078 + ".previous\n"
21079 + ".section __ex_table,\"a\"\n"
21080 + " .align 4\n"
21081 + " .long 1b,100b\n"
21082 + " .long 2b,100b\n"
21083 + " .long 3b,100b\n"
21084 + " .long 4b,100b\n"
21085 + " .long 5b,100b\n"
21086 + " .long 6b,100b\n"
21087 + " .long 7b,100b\n"
21088 + " .long 8b,100b\n"
21089 + " .long 9b,100b\n"
21090 + " .long 10b,100b\n"
21091 + " .long 11b,100b\n"
21092 + " .long 12b,100b\n"
21093 + " .long 13b,100b\n"
21094 + " .long 14b,100b\n"
21095 + " .long 15b,100b\n"
21096 + " .long 16b,100b\n"
21097 + " .long 17b,100b\n"
21098 + " .long 18b,100b\n"
21099 + " .long 19b,100b\n"
21100 + " .long 20b,100b\n"
21101 + " .long 21b,100b\n"
21102 + " .long 22b,100b\n"
21103 + " .long 23b,100b\n"
21104 + " .long 24b,100b\n"
21105 + " .long 25b,100b\n"
21106 + " .long 26b,100b\n"
21107 + " .long 27b,100b\n"
21108 + " .long 28b,100b\n"
21109 + " .long 29b,100b\n"
21110 + " .long 30b,100b\n"
21111 + " .long 31b,100b\n"
21112 + " .long 32b,100b\n"
21113 + " .long 33b,100b\n"
21114 + " .long 34b,100b\n"
21115 + " .long 35b,100b\n"
21116 + " .long 36b,100b\n"
21117 + " .long 37b,100b\n"
21118 + " .long 99b,101b\n"
21119 + ".previous"
21120 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
21121 + : "1"(to), "2"(from), "0"(size)
21122 + : "eax", "edx", "memory");
21123 + return size;
21124 +}
21125 +
21126 +static unsigned long
21127 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
21128 +{
21129 + int d0, d1;
21130 + __asm__ __volatile__(
21131 + " .align 2,0x90\n"
21132 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
21133 + " cmpl $67, %0\n"
21134 + " jbe 3f\n"
21135 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
21136 + " .align 2,0x90\n"
21137 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
21138 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
21139 + "5: movl %%eax, 0(%3)\n"
21140 + "6: movl %%edx, 4(%3)\n"
21141 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
21142 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
21143 + "9: movl %%eax, 8(%3)\n"
21144 + "10: movl %%edx, 12(%3)\n"
21145 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
21146 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
21147 + "13: movl %%eax, 16(%3)\n"
21148 + "14: movl %%edx, 20(%3)\n"
21149 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
21150 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
21151 + "17: movl %%eax, 24(%3)\n"
21152 + "18: movl %%edx, 28(%3)\n"
21153 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
21154 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
21155 + "21: movl %%eax, 32(%3)\n"
21156 + "22: movl %%edx, 36(%3)\n"
21157 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
21158 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
21159 + "25: movl %%eax, 40(%3)\n"
21160 + "26: movl %%edx, 44(%3)\n"
21161 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
21162 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
21163 + "29: movl %%eax, 48(%3)\n"
21164 + "30: movl %%edx, 52(%3)\n"
21165 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
21166 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
21167 + "33: movl %%eax, 56(%3)\n"
21168 + "34: movl %%edx, 60(%3)\n"
21169 + " addl $-64, %0\n"
21170 + " addl $64, %4\n"
21171 + " addl $64, %3\n"
21172 + " cmpl $63, %0\n"
21173 + " ja 1b\n"
21174 + "35: movl %0, %%eax\n"
21175 + " shrl $2, %0\n"
21176 + " andl $3, %%eax\n"
21177 + " cld\n"
21178 + "99: rep; "__copyuser_seg" movsl\n"
21179 + "36: movl %%eax, %0\n"
21180 + "37: rep; "__copyuser_seg" movsb\n"
21181 + "100:\n"
21182 ".section .fixup,\"ax\"\n"
21183 "101: lea 0(%%eax,%0,4),%0\n"
21184 " jmp 100b\n"
21185 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21186 int d0, d1;
21187 __asm__ __volatile__(
21188 " .align 2,0x90\n"
21189 - "0: movl 32(%4), %%eax\n"
21190 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21191 " cmpl $67, %0\n"
21192 " jbe 2f\n"
21193 - "1: movl 64(%4), %%eax\n"
21194 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21195 " .align 2,0x90\n"
21196 - "2: movl 0(%4), %%eax\n"
21197 - "21: movl 4(%4), %%edx\n"
21198 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21199 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21200 " movl %%eax, 0(%3)\n"
21201 " movl %%edx, 4(%3)\n"
21202 - "3: movl 8(%4), %%eax\n"
21203 - "31: movl 12(%4),%%edx\n"
21204 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21205 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21206 " movl %%eax, 8(%3)\n"
21207 " movl %%edx, 12(%3)\n"
21208 - "4: movl 16(%4), %%eax\n"
21209 - "41: movl 20(%4), %%edx\n"
21210 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21211 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21212 " movl %%eax, 16(%3)\n"
21213 " movl %%edx, 20(%3)\n"
21214 - "10: movl 24(%4), %%eax\n"
21215 - "51: movl 28(%4), %%edx\n"
21216 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21217 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21218 " movl %%eax, 24(%3)\n"
21219 " movl %%edx, 28(%3)\n"
21220 - "11: movl 32(%4), %%eax\n"
21221 - "61: movl 36(%4), %%edx\n"
21222 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21223 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21224 " movl %%eax, 32(%3)\n"
21225 " movl %%edx, 36(%3)\n"
21226 - "12: movl 40(%4), %%eax\n"
21227 - "71: movl 44(%4), %%edx\n"
21228 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21229 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21230 " movl %%eax, 40(%3)\n"
21231 " movl %%edx, 44(%3)\n"
21232 - "13: movl 48(%4), %%eax\n"
21233 - "81: movl 52(%4), %%edx\n"
21234 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21235 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21236 " movl %%eax, 48(%3)\n"
21237 " movl %%edx, 52(%3)\n"
21238 - "14: movl 56(%4), %%eax\n"
21239 - "91: movl 60(%4), %%edx\n"
21240 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21241 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21242 " movl %%eax, 56(%3)\n"
21243 " movl %%edx, 60(%3)\n"
21244 " addl $-64, %0\n"
21245 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21246 " shrl $2, %0\n"
21247 " andl $3, %%eax\n"
21248 " cld\n"
21249 - "6: rep; movsl\n"
21250 + "6: rep; "__copyuser_seg" movsl\n"
21251 " movl %%eax,%0\n"
21252 - "7: rep; movsb\n"
21253 + "7: rep; "__copyuser_seg" movsb\n"
21254 "8:\n"
21255 ".section .fixup,\"ax\"\n"
21256 "9: lea 0(%%eax,%0,4),%0\n"
21257 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21258
21259 __asm__ __volatile__(
21260 " .align 2,0x90\n"
21261 - "0: movl 32(%4), %%eax\n"
21262 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21263 " cmpl $67, %0\n"
21264 " jbe 2f\n"
21265 - "1: movl 64(%4), %%eax\n"
21266 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21267 " .align 2,0x90\n"
21268 - "2: movl 0(%4), %%eax\n"
21269 - "21: movl 4(%4), %%edx\n"
21270 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21271 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21272 " movnti %%eax, 0(%3)\n"
21273 " movnti %%edx, 4(%3)\n"
21274 - "3: movl 8(%4), %%eax\n"
21275 - "31: movl 12(%4),%%edx\n"
21276 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21277 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21278 " movnti %%eax, 8(%3)\n"
21279 " movnti %%edx, 12(%3)\n"
21280 - "4: movl 16(%4), %%eax\n"
21281 - "41: movl 20(%4), %%edx\n"
21282 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21283 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21284 " movnti %%eax, 16(%3)\n"
21285 " movnti %%edx, 20(%3)\n"
21286 - "10: movl 24(%4), %%eax\n"
21287 - "51: movl 28(%4), %%edx\n"
21288 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21289 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21290 " movnti %%eax, 24(%3)\n"
21291 " movnti %%edx, 28(%3)\n"
21292 - "11: movl 32(%4), %%eax\n"
21293 - "61: movl 36(%4), %%edx\n"
21294 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21295 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21296 " movnti %%eax, 32(%3)\n"
21297 " movnti %%edx, 36(%3)\n"
21298 - "12: movl 40(%4), %%eax\n"
21299 - "71: movl 44(%4), %%edx\n"
21300 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21301 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21302 " movnti %%eax, 40(%3)\n"
21303 " movnti %%edx, 44(%3)\n"
21304 - "13: movl 48(%4), %%eax\n"
21305 - "81: movl 52(%4), %%edx\n"
21306 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21307 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21308 " movnti %%eax, 48(%3)\n"
21309 " movnti %%edx, 52(%3)\n"
21310 - "14: movl 56(%4), %%eax\n"
21311 - "91: movl 60(%4), %%edx\n"
21312 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21313 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21314 " movnti %%eax, 56(%3)\n"
21315 " movnti %%edx, 60(%3)\n"
21316 " addl $-64, %0\n"
21317 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21318 " shrl $2, %0\n"
21319 " andl $3, %%eax\n"
21320 " cld\n"
21321 - "6: rep; movsl\n"
21322 + "6: rep; "__copyuser_seg" movsl\n"
21323 " movl %%eax,%0\n"
21324 - "7: rep; movsb\n"
21325 + "7: rep; "__copyuser_seg" movsb\n"
21326 "8:\n"
21327 ".section .fixup,\"ax\"\n"
21328 "9: lea 0(%%eax,%0,4),%0\n"
21329 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21330
21331 __asm__ __volatile__(
21332 " .align 2,0x90\n"
21333 - "0: movl 32(%4), %%eax\n"
21334 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21335 " cmpl $67, %0\n"
21336 " jbe 2f\n"
21337 - "1: movl 64(%4), %%eax\n"
21338 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21339 " .align 2,0x90\n"
21340 - "2: movl 0(%4), %%eax\n"
21341 - "21: movl 4(%4), %%edx\n"
21342 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21343 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21344 " movnti %%eax, 0(%3)\n"
21345 " movnti %%edx, 4(%3)\n"
21346 - "3: movl 8(%4), %%eax\n"
21347 - "31: movl 12(%4),%%edx\n"
21348 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21349 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21350 " movnti %%eax, 8(%3)\n"
21351 " movnti %%edx, 12(%3)\n"
21352 - "4: movl 16(%4), %%eax\n"
21353 - "41: movl 20(%4), %%edx\n"
21354 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21355 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21356 " movnti %%eax, 16(%3)\n"
21357 " movnti %%edx, 20(%3)\n"
21358 - "10: movl 24(%4), %%eax\n"
21359 - "51: movl 28(%4), %%edx\n"
21360 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21361 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21362 " movnti %%eax, 24(%3)\n"
21363 " movnti %%edx, 28(%3)\n"
21364 - "11: movl 32(%4), %%eax\n"
21365 - "61: movl 36(%4), %%edx\n"
21366 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21367 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21368 " movnti %%eax, 32(%3)\n"
21369 " movnti %%edx, 36(%3)\n"
21370 - "12: movl 40(%4), %%eax\n"
21371 - "71: movl 44(%4), %%edx\n"
21372 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21373 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21374 " movnti %%eax, 40(%3)\n"
21375 " movnti %%edx, 44(%3)\n"
21376 - "13: movl 48(%4), %%eax\n"
21377 - "81: movl 52(%4), %%edx\n"
21378 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21379 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21380 " movnti %%eax, 48(%3)\n"
21381 " movnti %%edx, 52(%3)\n"
21382 - "14: movl 56(%4), %%eax\n"
21383 - "91: movl 60(%4), %%edx\n"
21384 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21385 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21386 " movnti %%eax, 56(%3)\n"
21387 " movnti %%edx, 60(%3)\n"
21388 " addl $-64, %0\n"
21389 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21390 " shrl $2, %0\n"
21391 " andl $3, %%eax\n"
21392 " cld\n"
21393 - "6: rep; movsl\n"
21394 + "6: rep; "__copyuser_seg" movsl\n"
21395 " movl %%eax,%0\n"
21396 - "7: rep; movsb\n"
21397 + "7: rep; "__copyuser_seg" movsb\n"
21398 "8:\n"
21399 ".section .fixup,\"ax\"\n"
21400 "9: lea 0(%%eax,%0,4),%0\n"
21401 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21402 */
21403 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21404 unsigned long size);
21405 -unsigned long __copy_user_intel(void __user *to, const void *from,
21406 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21407 + unsigned long size);
21408 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21409 unsigned long size);
21410 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21411 const void __user *from, unsigned long size);
21412 #endif /* CONFIG_X86_INTEL_USERCOPY */
21413
21414 /* Generic arbitrary sized copy. */
21415 -#define __copy_user(to, from, size) \
21416 +#define __copy_user(to, from, size, prefix, set, restore) \
21417 do { \
21418 int __d0, __d1, __d2; \
21419 __asm__ __volatile__( \
21420 + set \
21421 " cmp $7,%0\n" \
21422 " jbe 1f\n" \
21423 " movl %1,%0\n" \
21424 " negl %0\n" \
21425 " andl $7,%0\n" \
21426 " subl %0,%3\n" \
21427 - "4: rep; movsb\n" \
21428 + "4: rep; "prefix"movsb\n" \
21429 " movl %3,%0\n" \
21430 " shrl $2,%0\n" \
21431 " andl $3,%3\n" \
21432 " .align 2,0x90\n" \
21433 - "0: rep; movsl\n" \
21434 + "0: rep; "prefix"movsl\n" \
21435 " movl %3,%0\n" \
21436 - "1: rep; movsb\n" \
21437 + "1: rep; "prefix"movsb\n" \
21438 "2:\n" \
21439 + restore \
21440 ".section .fixup,\"ax\"\n" \
21441 "5: addl %3,%0\n" \
21442 " jmp 2b\n" \
21443 @@ -682,14 +799,14 @@ do { \
21444 " negl %0\n" \
21445 " andl $7,%0\n" \
21446 " subl %0,%3\n" \
21447 - "4: rep; movsb\n" \
21448 + "4: rep; "__copyuser_seg"movsb\n" \
21449 " movl %3,%0\n" \
21450 " shrl $2,%0\n" \
21451 " andl $3,%3\n" \
21452 " .align 2,0x90\n" \
21453 - "0: rep; movsl\n" \
21454 + "0: rep; "__copyuser_seg"movsl\n" \
21455 " movl %3,%0\n" \
21456 - "1: rep; movsb\n" \
21457 + "1: rep; "__copyuser_seg"movsb\n" \
21458 "2:\n" \
21459 ".section .fixup,\"ax\"\n" \
21460 "5: addl %3,%0\n" \
21461 @@ -775,9 +892,9 @@ survive:
21462 }
21463 #endif
21464 if (movsl_is_ok(to, from, n))
21465 - __copy_user(to, from, n);
21466 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21467 else
21468 - n = __copy_user_intel(to, from, n);
21469 + n = __generic_copy_to_user_intel(to, from, n);
21470 return n;
21471 }
21472 EXPORT_SYMBOL(__copy_to_user_ll);
21473 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21474 unsigned long n)
21475 {
21476 if (movsl_is_ok(to, from, n))
21477 - __copy_user(to, from, n);
21478 + __copy_user(to, from, n, __copyuser_seg, "", "");
21479 else
21480 - n = __copy_user_intel((void __user *)to,
21481 - (const void *)from, n);
21482 + n = __generic_copy_from_user_intel(to, from, n);
21483 return n;
21484 }
21485 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21486 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21487 if (n > 64 && cpu_has_xmm2)
21488 n = __copy_user_intel_nocache(to, from, n);
21489 else
21490 - __copy_user(to, from, n);
21491 + __copy_user(to, from, n, __copyuser_seg, "", "");
21492 #else
21493 - __copy_user(to, from, n);
21494 + __copy_user(to, from, n, __copyuser_seg, "", "");
21495 #endif
21496 return n;
21497 }
21498 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21499
21500 -/**
21501 - * copy_to_user: - Copy a block of data into user space.
21502 - * @to: Destination address, in user space.
21503 - * @from: Source address, in kernel space.
21504 - * @n: Number of bytes to copy.
21505 - *
21506 - * Context: User context only. This function may sleep.
21507 - *
21508 - * Copy data from kernel space to user space.
21509 - *
21510 - * Returns number of bytes that could not be copied.
21511 - * On success, this will be zero.
21512 - */
21513 -unsigned long
21514 -copy_to_user(void __user *to, const void *from, unsigned long n)
21515 -{
21516 - if (access_ok(VERIFY_WRITE, to, n))
21517 - n = __copy_to_user(to, from, n);
21518 - return n;
21519 -}
21520 -EXPORT_SYMBOL(copy_to_user);
21521 -
21522 -/**
21523 - * copy_from_user: - Copy a block of data from user space.
21524 - * @to: Destination address, in kernel space.
21525 - * @from: Source address, in user space.
21526 - * @n: Number of bytes to copy.
21527 - *
21528 - * Context: User context only. This function may sleep.
21529 - *
21530 - * Copy data from user space to kernel space.
21531 - *
21532 - * Returns number of bytes that could not be copied.
21533 - * On success, this will be zero.
21534 - *
21535 - * If some data could not be copied, this function will pad the copied
21536 - * data to the requested size using zero bytes.
21537 - */
21538 -unsigned long
21539 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21540 -{
21541 - if (access_ok(VERIFY_READ, from, n))
21542 - n = __copy_from_user(to, from, n);
21543 - else
21544 - memset(to, 0, n);
21545 - return n;
21546 -}
21547 -EXPORT_SYMBOL(_copy_from_user);
21548 -
21549 void copy_from_user_overflow(void)
21550 {
21551 WARN(1, "Buffer overflow detected!\n");
21552 }
21553 EXPORT_SYMBOL(copy_from_user_overflow);
21554 +
21555 +void copy_to_user_overflow(void)
21556 +{
21557 + WARN(1, "Buffer overflow detected!\n");
21558 +}
21559 +EXPORT_SYMBOL(copy_to_user_overflow);
21560 +
21561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21562 +void __set_fs(mm_segment_t x)
21563 +{
21564 + switch (x.seg) {
21565 + case 0:
21566 + loadsegment(gs, 0);
21567 + break;
21568 + case TASK_SIZE_MAX:
21569 + loadsegment(gs, __USER_DS);
21570 + break;
21571 + case -1UL:
21572 + loadsegment(gs, __KERNEL_DS);
21573 + break;
21574 + default:
21575 + BUG();
21576 + }
21577 + return;
21578 +}
21579 +EXPORT_SYMBOL(__set_fs);
21580 +
21581 +void set_fs(mm_segment_t x)
21582 +{
21583 + current_thread_info()->addr_limit = x;
21584 + __set_fs(x);
21585 +}
21586 +EXPORT_SYMBOL(set_fs);
21587 +#endif
21588 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21589 index b7c2849..8633ad8 100644
21590 --- a/arch/x86/lib/usercopy_64.c
21591 +++ b/arch/x86/lib/usercopy_64.c
21592 @@ -42,6 +42,12 @@ long
21593 __strncpy_from_user(char *dst, const char __user *src, long count)
21594 {
21595 long res;
21596 +
21597 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21598 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21599 + src += PAX_USER_SHADOW_BASE;
21600 +#endif
21601 +
21602 __do_strncpy_from_user(dst, src, count, res);
21603 return res;
21604 }
21605 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21606 {
21607 long __d0;
21608 might_fault();
21609 +
21610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21611 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21612 + addr += PAX_USER_SHADOW_BASE;
21613 +#endif
21614 +
21615 /* no memory constraint because it doesn't change any memory gcc knows
21616 about */
21617 asm volatile(
21618 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21619 }
21620 EXPORT_SYMBOL(strlen_user);
21621
21622 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21623 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21624 {
21625 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21626 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21627 - }
21628 - return len;
21629 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21630 +
21631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21632 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21633 + to += PAX_USER_SHADOW_BASE;
21634 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21635 + from += PAX_USER_SHADOW_BASE;
21636 +#endif
21637 +
21638 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21639 + }
21640 + return len;
21641 }
21642 EXPORT_SYMBOL(copy_in_user);
21643
21644 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21645 * it is not necessary to optimize tail handling.
21646 */
21647 unsigned long
21648 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21649 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21650 {
21651 char c;
21652 unsigned zero_len;
21653 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21654 index d0474ad..36e9257 100644
21655 --- a/arch/x86/mm/extable.c
21656 +++ b/arch/x86/mm/extable.c
21657 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21658 const struct exception_table_entry *fixup;
21659
21660 #ifdef CONFIG_PNPBIOS
21661 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21662 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21663 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21664 extern u32 pnp_bios_is_utter_crap;
21665 pnp_bios_is_utter_crap = 1;
21666 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21667 index 5db0490..2ddce45 100644
21668 --- a/arch/x86/mm/fault.c
21669 +++ b/arch/x86/mm/fault.c
21670 @@ -13,11 +13,18 @@
21671 #include <linux/perf_event.h> /* perf_sw_event */
21672 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21673 #include <linux/prefetch.h> /* prefetchw */
21674 +#include <linux/unistd.h>
21675 +#include <linux/compiler.h>
21676
21677 #include <asm/traps.h> /* dotraplinkage, ... */
21678 #include <asm/pgalloc.h> /* pgd_*(), ... */
21679 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21680 #include <asm/fixmap.h> /* VSYSCALL_START */
21681 +#include <asm/tlbflush.h>
21682 +
21683 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21684 +#include <asm/stacktrace.h>
21685 +#endif
21686
21687 /*
21688 * Page fault error code bits:
21689 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21690 int ret = 0;
21691
21692 /* kprobe_running() needs smp_processor_id() */
21693 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21694 + if (kprobes_built_in() && !user_mode(regs)) {
21695 preempt_disable();
21696 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21697 ret = 1;
21698 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21699 return !instr_lo || (instr_lo>>1) == 1;
21700 case 0x00:
21701 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21702 - if (probe_kernel_address(instr, opcode))
21703 + if (user_mode(regs)) {
21704 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21705 + return 0;
21706 + } else if (probe_kernel_address(instr, opcode))
21707 return 0;
21708
21709 *prefetch = (instr_lo == 0xF) &&
21710 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21711 while (instr < max_instr) {
21712 unsigned char opcode;
21713
21714 - if (probe_kernel_address(instr, opcode))
21715 + if (user_mode(regs)) {
21716 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21717 + break;
21718 + } else if (probe_kernel_address(instr, opcode))
21719 break;
21720
21721 instr++;
21722 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21723 force_sig_info(si_signo, &info, tsk);
21724 }
21725
21726 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21727 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21728 +#endif
21729 +
21730 +#ifdef CONFIG_PAX_EMUTRAMP
21731 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21732 +#endif
21733 +
21734 +#ifdef CONFIG_PAX_PAGEEXEC
21735 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21736 +{
21737 + pgd_t *pgd;
21738 + pud_t *pud;
21739 + pmd_t *pmd;
21740 +
21741 + pgd = pgd_offset(mm, address);
21742 + if (!pgd_present(*pgd))
21743 + return NULL;
21744 + pud = pud_offset(pgd, address);
21745 + if (!pud_present(*pud))
21746 + return NULL;
21747 + pmd = pmd_offset(pud, address);
21748 + if (!pmd_present(*pmd))
21749 + return NULL;
21750 + return pmd;
21751 +}
21752 +#endif
21753 +
21754 DEFINE_SPINLOCK(pgd_lock);
21755 LIST_HEAD(pgd_list);
21756
21757 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21758 for (address = VMALLOC_START & PMD_MASK;
21759 address >= TASK_SIZE && address < FIXADDR_TOP;
21760 address += PMD_SIZE) {
21761 +
21762 +#ifdef CONFIG_PAX_PER_CPU_PGD
21763 + unsigned long cpu;
21764 +#else
21765 struct page *page;
21766 +#endif
21767
21768 spin_lock(&pgd_lock);
21769 +
21770 +#ifdef CONFIG_PAX_PER_CPU_PGD
21771 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21772 + pgd_t *pgd = get_cpu_pgd(cpu);
21773 + pmd_t *ret;
21774 +#else
21775 list_for_each_entry(page, &pgd_list, lru) {
21776 + pgd_t *pgd = page_address(page);
21777 spinlock_t *pgt_lock;
21778 pmd_t *ret;
21779
21780 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21781 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21782
21783 spin_lock(pgt_lock);
21784 - ret = vmalloc_sync_one(page_address(page), address);
21785 +#endif
21786 +
21787 + ret = vmalloc_sync_one(pgd, address);
21788 +
21789 +#ifndef CONFIG_PAX_PER_CPU_PGD
21790 spin_unlock(pgt_lock);
21791 +#endif
21792
21793 if (!ret)
21794 break;
21795 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21796 * an interrupt in the middle of a task switch..
21797 */
21798 pgd_paddr = read_cr3();
21799 +
21800 +#ifdef CONFIG_PAX_PER_CPU_PGD
21801 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21802 +#endif
21803 +
21804 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21805 if (!pmd_k)
21806 return -1;
21807 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21808 * happen within a race in page table update. In the later
21809 * case just flush:
21810 */
21811 +
21812 +#ifdef CONFIG_PAX_PER_CPU_PGD
21813 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21814 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21815 +#else
21816 pgd = pgd_offset(current->active_mm, address);
21817 +#endif
21818 +
21819 pgd_ref = pgd_offset_k(address);
21820 if (pgd_none(*pgd_ref))
21821 return -1;
21822 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21823 static int is_errata100(struct pt_regs *regs, unsigned long address)
21824 {
21825 #ifdef CONFIG_X86_64
21826 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21827 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21828 return 1;
21829 #endif
21830 return 0;
21831 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21832 }
21833
21834 static const char nx_warning[] = KERN_CRIT
21835 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21836 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21837
21838 static void
21839 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21840 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21841 if (!oops_may_print())
21842 return;
21843
21844 - if (error_code & PF_INSTR) {
21845 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21846 unsigned int level;
21847
21848 pte_t *pte = lookup_address(address, &level);
21849
21850 if (pte && pte_present(*pte) && !pte_exec(*pte))
21851 - printk(nx_warning, current_uid());
21852 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21853 }
21854
21855 +#ifdef CONFIG_PAX_KERNEXEC
21856 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21857 + if (current->signal->curr_ip)
21858 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21859 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21860 + else
21861 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21862 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21863 + }
21864 +#endif
21865 +
21866 printk(KERN_ALERT "BUG: unable to handle kernel ");
21867 if (address < PAGE_SIZE)
21868 printk(KERN_CONT "NULL pointer dereference");
21869 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21870 }
21871 #endif
21872
21873 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21874 + if (pax_is_fetch_fault(regs, error_code, address)) {
21875 +
21876 +#ifdef CONFIG_PAX_EMUTRAMP
21877 + switch (pax_handle_fetch_fault(regs)) {
21878 + case 2:
21879 + return;
21880 + }
21881 +#endif
21882 +
21883 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21884 + do_group_exit(SIGKILL);
21885 + }
21886 +#endif
21887 +
21888 if (unlikely(show_unhandled_signals))
21889 show_signal_msg(regs, error_code, address, tsk);
21890
21891 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21892 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21893 printk(KERN_ERR
21894 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21895 - tsk->comm, tsk->pid, address);
21896 + tsk->comm, task_pid_nr(tsk), address);
21897 code = BUS_MCEERR_AR;
21898 }
21899 #endif
21900 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21901 return 1;
21902 }
21903
21904 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21905 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21906 +{
21907 + pte_t *pte;
21908 + pmd_t *pmd;
21909 + spinlock_t *ptl;
21910 + unsigned char pte_mask;
21911 +
21912 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21913 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21914 + return 0;
21915 +
21916 + /* PaX: it's our fault, let's handle it if we can */
21917 +
21918 + /* PaX: take a look at read faults before acquiring any locks */
21919 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21920 + /* instruction fetch attempt from a protected page in user mode */
21921 + up_read(&mm->mmap_sem);
21922 +
21923 +#ifdef CONFIG_PAX_EMUTRAMP
21924 + switch (pax_handle_fetch_fault(regs)) {
21925 + case 2:
21926 + return 1;
21927 + }
21928 +#endif
21929 +
21930 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21931 + do_group_exit(SIGKILL);
21932 + }
21933 +
21934 + pmd = pax_get_pmd(mm, address);
21935 + if (unlikely(!pmd))
21936 + return 0;
21937 +
21938 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21939 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21940 + pte_unmap_unlock(pte, ptl);
21941 + return 0;
21942 + }
21943 +
21944 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21945 + /* write attempt to a protected page in user mode */
21946 + pte_unmap_unlock(pte, ptl);
21947 + return 0;
21948 + }
21949 +
21950 +#ifdef CONFIG_SMP
21951 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21952 +#else
21953 + if (likely(address > get_limit(regs->cs)))
21954 +#endif
21955 + {
21956 + set_pte(pte, pte_mkread(*pte));
21957 + __flush_tlb_one(address);
21958 + pte_unmap_unlock(pte, ptl);
21959 + up_read(&mm->mmap_sem);
21960 + return 1;
21961 + }
21962 +
21963 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21964 +
21965 + /*
21966 + * PaX: fill DTLB with user rights and retry
21967 + */
21968 + __asm__ __volatile__ (
21969 + "orb %2,(%1)\n"
21970 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21971 +/*
21972 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21973 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21974 + * page fault when examined during a TLB load attempt. this is true not only
21975 + * for PTEs holding a non-present entry but also present entries that will
21976 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21977 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21978 + * for our target pages since their PTEs are simply not in the TLBs at all.
21979 +
21980 + * the best thing in omitting it is that we gain around 15-20% speed in the
21981 + * fast path of the page fault handler and can get rid of tracing since we
21982 + * can no longer flush unintended entries.
21983 + */
21984 + "invlpg (%0)\n"
21985 +#endif
21986 + __copyuser_seg"testb $0,(%0)\n"
21987 + "xorb %3,(%1)\n"
21988 + :
21989 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21990 + : "memory", "cc");
21991 + pte_unmap_unlock(pte, ptl);
21992 + up_read(&mm->mmap_sem);
21993 + return 1;
21994 +}
21995 +#endif
21996 +
21997 /*
21998 * Handle a spurious fault caused by a stale TLB entry.
21999 *
22000 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
22001 static inline int
22002 access_error(unsigned long error_code, struct vm_area_struct *vma)
22003 {
22004 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
22005 + return 1;
22006 +
22007 if (error_code & PF_WRITE) {
22008 /* write, present and write, not present: */
22009 if (unlikely(!(vma->vm_flags & VM_WRITE)))
22010 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22011 {
22012 struct vm_area_struct *vma;
22013 struct task_struct *tsk;
22014 - unsigned long address;
22015 struct mm_struct *mm;
22016 int fault;
22017 int write = error_code & PF_WRITE;
22018 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
22019 (write ? FAULT_FLAG_WRITE : 0);
22020
22021 - tsk = current;
22022 - mm = tsk->mm;
22023 -
22024 /* Get the faulting address: */
22025 - address = read_cr2();
22026 + unsigned long address = read_cr2();
22027 +
22028 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22029 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
22030 + if (!search_exception_tables(regs->ip)) {
22031 + bad_area_nosemaphore(regs, error_code, address);
22032 + return;
22033 + }
22034 + if (address < PAX_USER_SHADOW_BASE) {
22035 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
22036 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
22037 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
22038 + } else
22039 + address -= PAX_USER_SHADOW_BASE;
22040 + }
22041 +#endif
22042 +
22043 + tsk = current;
22044 + mm = tsk->mm;
22045
22046 /*
22047 * Detect and handle instructions that would cause a page fault for
22048 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
22049 * User-mode registers count as a user access even for any
22050 * potential system fault or CPU buglet:
22051 */
22052 - if (user_mode_vm(regs)) {
22053 + if (user_mode(regs)) {
22054 local_irq_enable();
22055 error_code |= PF_USER;
22056 } else {
22057 @@ -1122,6 +1328,11 @@ retry:
22058 might_sleep();
22059 }
22060
22061 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22062 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
22063 + return;
22064 +#endif
22065 +
22066 vma = find_vma(mm, address);
22067 if (unlikely(!vma)) {
22068 bad_area(regs, error_code, address);
22069 @@ -1133,18 +1344,24 @@ retry:
22070 bad_area(regs, error_code, address);
22071 return;
22072 }
22073 - if (error_code & PF_USER) {
22074 - /*
22075 - * Accessing the stack below %sp is always a bug.
22076 - * The large cushion allows instructions like enter
22077 - * and pusha to work. ("enter $65535, $31" pushes
22078 - * 32 pointers and then decrements %sp by 65535.)
22079 - */
22080 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
22081 - bad_area(regs, error_code, address);
22082 - return;
22083 - }
22084 + /*
22085 + * Accessing the stack below %sp is always a bug.
22086 + * The large cushion allows instructions like enter
22087 + * and pusha to work. ("enter $65535, $31" pushes
22088 + * 32 pointers and then decrements %sp by 65535.)
22089 + */
22090 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
22091 + bad_area(regs, error_code, address);
22092 + return;
22093 }
22094 +
22095 +#ifdef CONFIG_PAX_SEGMEXEC
22096 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
22097 + bad_area(regs, error_code, address);
22098 + return;
22099 + }
22100 +#endif
22101 +
22102 if (unlikely(expand_stack(vma, address))) {
22103 bad_area(regs, error_code, address);
22104 return;
22105 @@ -1199,3 +1416,292 @@ good_area:
22106
22107 up_read(&mm->mmap_sem);
22108 }
22109 +
22110 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22111 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
22112 +{
22113 + struct mm_struct *mm = current->mm;
22114 + unsigned long ip = regs->ip;
22115 +
22116 + if (v8086_mode(regs))
22117 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
22118 +
22119 +#ifdef CONFIG_PAX_PAGEEXEC
22120 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
22121 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
22122 + return true;
22123 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
22124 + return true;
22125 + return false;
22126 + }
22127 +#endif
22128 +
22129 +#ifdef CONFIG_PAX_SEGMEXEC
22130 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
22131 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
22132 + return true;
22133 + return false;
22134 + }
22135 +#endif
22136 +
22137 + return false;
22138 +}
22139 +#endif
22140 +
22141 +#ifdef CONFIG_PAX_EMUTRAMP
22142 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
22143 +{
22144 + int err;
22145 +
22146 + do { /* PaX: libffi trampoline emulation */
22147 + unsigned char mov, jmp;
22148 + unsigned int addr1, addr2;
22149 +
22150 +#ifdef CONFIG_X86_64
22151 + if ((regs->ip + 9) >> 32)
22152 + break;
22153 +#endif
22154 +
22155 + err = get_user(mov, (unsigned char __user *)regs->ip);
22156 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22157 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22158 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22159 +
22160 + if (err)
22161 + break;
22162 +
22163 + if (mov == 0xB8 && jmp == 0xE9) {
22164 + regs->ax = addr1;
22165 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22166 + return 2;
22167 + }
22168 + } while (0);
22169 +
22170 + do { /* PaX: gcc trampoline emulation #1 */
22171 + unsigned char mov1, mov2;
22172 + unsigned short jmp;
22173 + unsigned int addr1, addr2;
22174 +
22175 +#ifdef CONFIG_X86_64
22176 + if ((regs->ip + 11) >> 32)
22177 + break;
22178 +#endif
22179 +
22180 + err = get_user(mov1, (unsigned char __user *)regs->ip);
22181 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22182 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
22183 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22184 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
22185 +
22186 + if (err)
22187 + break;
22188 +
22189 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
22190 + regs->cx = addr1;
22191 + regs->ax = addr2;
22192 + regs->ip = addr2;
22193 + return 2;
22194 + }
22195 + } while (0);
22196 +
22197 + do { /* PaX: gcc trampoline emulation #2 */
22198 + unsigned char mov, jmp;
22199 + unsigned int addr1, addr2;
22200 +
22201 +#ifdef CONFIG_X86_64
22202 + if ((regs->ip + 9) >> 32)
22203 + break;
22204 +#endif
22205 +
22206 + err = get_user(mov, (unsigned char __user *)regs->ip);
22207 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
22208 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
22209 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
22210 +
22211 + if (err)
22212 + break;
22213 +
22214 + if (mov == 0xB9 && jmp == 0xE9) {
22215 + regs->cx = addr1;
22216 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
22217 + return 2;
22218 + }
22219 + } while (0);
22220 +
22221 + return 1; /* PaX in action */
22222 +}
22223 +
22224 +#ifdef CONFIG_X86_64
22225 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
22226 +{
22227 + int err;
22228 +
22229 + do { /* PaX: libffi trampoline emulation */
22230 + unsigned short mov1, mov2, jmp1;
22231 + unsigned char stcclc, jmp2;
22232 + unsigned long addr1, addr2;
22233 +
22234 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22235 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22236 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22237 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22238 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22239 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22240 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22241 +
22242 + if (err)
22243 + break;
22244 +
22245 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22246 + regs->r11 = addr1;
22247 + regs->r10 = addr2;
22248 + if (stcclc == 0xF8)
22249 + regs->flags &= ~X86_EFLAGS_CF;
22250 + else
22251 + regs->flags |= X86_EFLAGS_CF;
22252 + regs->ip = addr1;
22253 + return 2;
22254 + }
22255 + } while (0);
22256 +
22257 + do { /* PaX: gcc trampoline emulation #1 */
22258 + unsigned short mov1, mov2, jmp1;
22259 + unsigned char jmp2;
22260 + unsigned int addr1;
22261 + unsigned long addr2;
22262 +
22263 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22264 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22265 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22266 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22267 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22268 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22269 +
22270 + if (err)
22271 + break;
22272 +
22273 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22274 + regs->r11 = addr1;
22275 + regs->r10 = addr2;
22276 + regs->ip = addr1;
22277 + return 2;
22278 + }
22279 + } while (0);
22280 +
22281 + do { /* PaX: gcc trampoline emulation #2 */
22282 + unsigned short mov1, mov2, jmp1;
22283 + unsigned char jmp2;
22284 + unsigned long addr1, addr2;
22285 +
22286 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22287 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22288 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22289 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22290 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22291 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22292 +
22293 + if (err)
22294 + break;
22295 +
22296 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22297 + regs->r11 = addr1;
22298 + regs->r10 = addr2;
22299 + regs->ip = addr1;
22300 + return 2;
22301 + }
22302 + } while (0);
22303 +
22304 + return 1; /* PaX in action */
22305 +}
22306 +#endif
22307 +
22308 +/*
22309 + * PaX: decide what to do with offenders (regs->ip = fault address)
22310 + *
22311 + * returns 1 when task should be killed
22312 + * 2 when gcc trampoline was detected
22313 + */
22314 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22315 +{
22316 + if (v8086_mode(regs))
22317 + return 1;
22318 +
22319 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22320 + return 1;
22321 +
22322 +#ifdef CONFIG_X86_32
22323 + return pax_handle_fetch_fault_32(regs);
22324 +#else
22325 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22326 + return pax_handle_fetch_fault_32(regs);
22327 + else
22328 + return pax_handle_fetch_fault_64(regs);
22329 +#endif
22330 +}
22331 +#endif
22332 +
22333 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22334 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22335 +{
22336 + long i;
22337 +
22338 + printk(KERN_ERR "PAX: bytes at PC: ");
22339 + for (i = 0; i < 20; i++) {
22340 + unsigned char c;
22341 + if (get_user(c, (unsigned char __force_user *)pc+i))
22342 + printk(KERN_CONT "?? ");
22343 + else
22344 + printk(KERN_CONT "%02x ", c);
22345 + }
22346 + printk("\n");
22347 +
22348 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22349 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22350 + unsigned long c;
22351 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22352 +#ifdef CONFIG_X86_32
22353 + printk(KERN_CONT "???????? ");
22354 +#else
22355 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22356 + printk(KERN_CONT "???????? ???????? ");
22357 + else
22358 + printk(KERN_CONT "???????????????? ");
22359 +#endif
22360 + } else {
22361 +#ifdef CONFIG_X86_64
22362 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22363 + printk(KERN_CONT "%08x ", (unsigned int)c);
22364 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22365 + } else
22366 +#endif
22367 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22368 + }
22369 + }
22370 + printk("\n");
22371 +}
22372 +#endif
22373 +
22374 +/**
22375 + * probe_kernel_write(): safely attempt to write to a location
22376 + * @dst: address to write to
22377 + * @src: pointer to the data that shall be written
22378 + * @size: size of the data chunk
22379 + *
22380 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22381 + * happens, handle that and return -EFAULT.
22382 + */
22383 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22384 +{
22385 + long ret;
22386 + mm_segment_t old_fs = get_fs();
22387 +
22388 + set_fs(KERNEL_DS);
22389 + pagefault_disable();
22390 + pax_open_kernel();
22391 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22392 + pax_close_kernel();
22393 + pagefault_enable();
22394 + set_fs(old_fs);
22395 +
22396 + return ret ? -EFAULT : 0;
22397 +}
22398 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22399 index dd74e46..7d26398 100644
22400 --- a/arch/x86/mm/gup.c
22401 +++ b/arch/x86/mm/gup.c
22402 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22403 addr = start;
22404 len = (unsigned long) nr_pages << PAGE_SHIFT;
22405 end = start + len;
22406 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22407 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22408 (void __user *)start, len)))
22409 return 0;
22410
22411 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22412 index f4f29b1..5cac4fb 100644
22413 --- a/arch/x86/mm/highmem_32.c
22414 +++ b/arch/x86/mm/highmem_32.c
22415 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22416 idx = type + KM_TYPE_NR*smp_processor_id();
22417 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22418 BUG_ON(!pte_none(*(kmap_pte-idx)));
22419 +
22420 + pax_open_kernel();
22421 set_pte(kmap_pte-idx, mk_pte(page, prot));
22422 + pax_close_kernel();
22423 +
22424 arch_flush_lazy_mmu_mode();
22425
22426 return (void *)vaddr;
22427 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22428 index f581a18..29efd37 100644
22429 --- a/arch/x86/mm/hugetlbpage.c
22430 +++ b/arch/x86/mm/hugetlbpage.c
22431 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22432 struct hstate *h = hstate_file(file);
22433 struct mm_struct *mm = current->mm;
22434 struct vm_area_struct *vma;
22435 - unsigned long start_addr;
22436 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22437 +
22438 +#ifdef CONFIG_PAX_SEGMEXEC
22439 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22440 + pax_task_size = SEGMEXEC_TASK_SIZE;
22441 +#endif
22442 +
22443 + pax_task_size -= PAGE_SIZE;
22444
22445 if (len > mm->cached_hole_size) {
22446 - start_addr = mm->free_area_cache;
22447 + start_addr = mm->free_area_cache;
22448 } else {
22449 - start_addr = TASK_UNMAPPED_BASE;
22450 - mm->cached_hole_size = 0;
22451 + start_addr = mm->mmap_base;
22452 + mm->cached_hole_size = 0;
22453 }
22454
22455 full_search:
22456 @@ -280,26 +287,27 @@ full_search:
22457
22458 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22459 /* At this point: (!vma || addr < vma->vm_end). */
22460 - if (TASK_SIZE - len < addr) {
22461 + if (pax_task_size - len < addr) {
22462 /*
22463 * Start a new search - just in case we missed
22464 * some holes.
22465 */
22466 - if (start_addr != TASK_UNMAPPED_BASE) {
22467 - start_addr = TASK_UNMAPPED_BASE;
22468 + if (start_addr != mm->mmap_base) {
22469 + start_addr = mm->mmap_base;
22470 mm->cached_hole_size = 0;
22471 goto full_search;
22472 }
22473 return -ENOMEM;
22474 }
22475 - if (!vma || addr + len <= vma->vm_start) {
22476 - mm->free_area_cache = addr + len;
22477 - return addr;
22478 - }
22479 + if (check_heap_stack_gap(vma, addr, len))
22480 + break;
22481 if (addr + mm->cached_hole_size < vma->vm_start)
22482 mm->cached_hole_size = vma->vm_start - addr;
22483 addr = ALIGN(vma->vm_end, huge_page_size(h));
22484 }
22485 +
22486 + mm->free_area_cache = addr + len;
22487 + return addr;
22488 }
22489
22490 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22491 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22492 {
22493 struct hstate *h = hstate_file(file);
22494 struct mm_struct *mm = current->mm;
22495 - struct vm_area_struct *vma, *prev_vma;
22496 - unsigned long base = mm->mmap_base, addr = addr0;
22497 + struct vm_area_struct *vma;
22498 + unsigned long base = mm->mmap_base, addr;
22499 unsigned long largest_hole = mm->cached_hole_size;
22500 - int first_time = 1;
22501
22502 /* don't allow allocations above current base */
22503 if (mm->free_area_cache > base)
22504 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22505 largest_hole = 0;
22506 mm->free_area_cache = base;
22507 }
22508 -try_again:
22509 +
22510 /* make sure it can fit in the remaining address space */
22511 if (mm->free_area_cache < len)
22512 goto fail;
22513
22514 /* either no address requested or can't fit in requested address hole */
22515 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22516 + addr = (mm->free_area_cache - len);
22517 do {
22518 + addr &= huge_page_mask(h);
22519 + vma = find_vma(mm, addr);
22520 /*
22521 * Lookup failure means no vma is above this address,
22522 * i.e. return with success:
22523 - */
22524 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22525 - return addr;
22526 -
22527 - /*
22528 * new region fits between prev_vma->vm_end and
22529 * vma->vm_start, use it:
22530 */
22531 - if (addr + len <= vma->vm_start &&
22532 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22533 + if (check_heap_stack_gap(vma, addr, len)) {
22534 /* remember the address as a hint for next time */
22535 - mm->cached_hole_size = largest_hole;
22536 - return (mm->free_area_cache = addr);
22537 - } else {
22538 - /* pull free_area_cache down to the first hole */
22539 - if (mm->free_area_cache == vma->vm_end) {
22540 - mm->free_area_cache = vma->vm_start;
22541 - mm->cached_hole_size = largest_hole;
22542 - }
22543 + mm->cached_hole_size = largest_hole;
22544 + return (mm->free_area_cache = addr);
22545 + }
22546 + /* pull free_area_cache down to the first hole */
22547 + if (mm->free_area_cache == vma->vm_end) {
22548 + mm->free_area_cache = vma->vm_start;
22549 + mm->cached_hole_size = largest_hole;
22550 }
22551
22552 /* remember the largest hole we saw so far */
22553 if (addr + largest_hole < vma->vm_start)
22554 - largest_hole = vma->vm_start - addr;
22555 + largest_hole = vma->vm_start - addr;
22556
22557 /* try just below the current vma->vm_start */
22558 - addr = (vma->vm_start - len) & huge_page_mask(h);
22559 - } while (len <= vma->vm_start);
22560 + addr = skip_heap_stack_gap(vma, len);
22561 + } while (!IS_ERR_VALUE(addr));
22562
22563 fail:
22564 /*
22565 - * if hint left us with no space for the requested
22566 - * mapping then try again:
22567 - */
22568 - if (first_time) {
22569 - mm->free_area_cache = base;
22570 - largest_hole = 0;
22571 - first_time = 0;
22572 - goto try_again;
22573 - }
22574 - /*
22575 * A failed mmap() very likely causes application failure,
22576 * so fall back to the bottom-up function here. This scenario
22577 * can happen with large stack limits and large mmap()
22578 * allocations.
22579 */
22580 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22581 +
22582 +#ifdef CONFIG_PAX_SEGMEXEC
22583 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22584 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22585 + else
22586 +#endif
22587 +
22588 + mm->mmap_base = TASK_UNMAPPED_BASE;
22589 +
22590 +#ifdef CONFIG_PAX_RANDMMAP
22591 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22592 + mm->mmap_base += mm->delta_mmap;
22593 +#endif
22594 +
22595 + mm->free_area_cache = mm->mmap_base;
22596 mm->cached_hole_size = ~0UL;
22597 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22598 len, pgoff, flags);
22599 @@ -386,6 +392,7 @@ fail:
22600 /*
22601 * Restore the topdown base:
22602 */
22603 + mm->mmap_base = base;
22604 mm->free_area_cache = base;
22605 mm->cached_hole_size = ~0UL;
22606
22607 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22608 struct hstate *h = hstate_file(file);
22609 struct mm_struct *mm = current->mm;
22610 struct vm_area_struct *vma;
22611 + unsigned long pax_task_size = TASK_SIZE;
22612
22613 if (len & ~huge_page_mask(h))
22614 return -EINVAL;
22615 - if (len > TASK_SIZE)
22616 +
22617 +#ifdef CONFIG_PAX_SEGMEXEC
22618 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22619 + pax_task_size = SEGMEXEC_TASK_SIZE;
22620 +#endif
22621 +
22622 + pax_task_size -= PAGE_SIZE;
22623 +
22624 + if (len > pax_task_size)
22625 return -ENOMEM;
22626
22627 if (flags & MAP_FIXED) {
22628 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22629 if (addr) {
22630 addr = ALIGN(addr, huge_page_size(h));
22631 vma = find_vma(mm, addr);
22632 - if (TASK_SIZE - len >= addr &&
22633 - (!vma || addr + len <= vma->vm_start))
22634 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22635 return addr;
22636 }
22637 if (mm->get_unmapped_area == arch_get_unmapped_area)
22638 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22639 index 87488b9..399f416 100644
22640 --- a/arch/x86/mm/init.c
22641 +++ b/arch/x86/mm/init.c
22642 @@ -15,6 +15,7 @@
22643 #include <asm/tlbflush.h>
22644 #include <asm/tlb.h>
22645 #include <asm/proto.h>
22646 +#include <asm/desc.h>
22647
22648 unsigned long __initdata pgt_buf_start;
22649 unsigned long __meminitdata pgt_buf_end;
22650 @@ -31,7 +32,7 @@ int direct_gbpages
22651 static void __init find_early_table_space(unsigned long end, int use_pse,
22652 int use_gbpages)
22653 {
22654 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22655 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22656 phys_addr_t base;
22657
22658 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22659 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22660 */
22661 int devmem_is_allowed(unsigned long pagenr)
22662 {
22663 +#ifdef CONFIG_GRKERNSEC_KMEM
22664 + /* allow BDA */
22665 + if (!pagenr)
22666 + return 1;
22667 + /* allow EBDA */
22668 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22669 + return 1;
22670 +#else
22671 + if (!pagenr)
22672 + return 1;
22673 +#ifdef CONFIG_VM86
22674 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22675 + return 1;
22676 +#endif
22677 +#endif
22678 +
22679 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22680 + return 1;
22681 +#ifdef CONFIG_GRKERNSEC_KMEM
22682 + /* throw out everything else below 1MB */
22683 if (pagenr <= 256)
22684 - return 1;
22685 + return 0;
22686 +#endif
22687 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22688 return 0;
22689 if (!page_is_ram(pagenr))
22690 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22691
22692 void free_initmem(void)
22693 {
22694 +
22695 +#ifdef CONFIG_PAX_KERNEXEC
22696 +#ifdef CONFIG_X86_32
22697 + /* PaX: limit KERNEL_CS to actual size */
22698 + unsigned long addr, limit;
22699 + struct desc_struct d;
22700 + int cpu;
22701 +
22702 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22703 + limit = (limit - 1UL) >> PAGE_SHIFT;
22704 +
22705 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22706 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22707 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22708 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22709 + }
22710 +
22711 + /* PaX: make KERNEL_CS read-only */
22712 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22713 + if (!paravirt_enabled())
22714 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22715 +/*
22716 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22717 + pgd = pgd_offset_k(addr);
22718 + pud = pud_offset(pgd, addr);
22719 + pmd = pmd_offset(pud, addr);
22720 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22721 + }
22722 +*/
22723 +#ifdef CONFIG_X86_PAE
22724 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22725 +/*
22726 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22727 + pgd = pgd_offset_k(addr);
22728 + pud = pud_offset(pgd, addr);
22729 + pmd = pmd_offset(pud, addr);
22730 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22731 + }
22732 +*/
22733 +#endif
22734 +
22735 +#ifdef CONFIG_MODULES
22736 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22737 +#endif
22738 +
22739 +#else
22740 + pgd_t *pgd;
22741 + pud_t *pud;
22742 + pmd_t *pmd;
22743 + unsigned long addr, end;
22744 +
22745 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22746 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22747 + pgd = pgd_offset_k(addr);
22748 + pud = pud_offset(pgd, addr);
22749 + pmd = pmd_offset(pud, addr);
22750 + if (!pmd_present(*pmd))
22751 + continue;
22752 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22753 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22754 + else
22755 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22756 + }
22757 +
22758 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22759 + end = addr + KERNEL_IMAGE_SIZE;
22760 + for (; addr < end; addr += PMD_SIZE) {
22761 + pgd = pgd_offset_k(addr);
22762 + pud = pud_offset(pgd, addr);
22763 + pmd = pmd_offset(pud, addr);
22764 + if (!pmd_present(*pmd))
22765 + continue;
22766 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22767 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22768 + }
22769 +#endif
22770 +
22771 + flush_tlb_all();
22772 +#endif
22773 +
22774 free_init_pages("unused kernel memory",
22775 (unsigned long)(&__init_begin),
22776 (unsigned long)(&__init_end));
22777 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22778 index 29f7c6d..b46b35b 100644
22779 --- a/arch/x86/mm/init_32.c
22780 +++ b/arch/x86/mm/init_32.c
22781 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22782 }
22783
22784 /*
22785 - * Creates a middle page table and puts a pointer to it in the
22786 - * given global directory entry. This only returns the gd entry
22787 - * in non-PAE compilation mode, since the middle layer is folded.
22788 - */
22789 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22790 -{
22791 - pud_t *pud;
22792 - pmd_t *pmd_table;
22793 -
22794 -#ifdef CONFIG_X86_PAE
22795 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22796 - if (after_bootmem)
22797 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22798 - else
22799 - pmd_table = (pmd_t *)alloc_low_page();
22800 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22801 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22802 - pud = pud_offset(pgd, 0);
22803 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22804 -
22805 - return pmd_table;
22806 - }
22807 -#endif
22808 - pud = pud_offset(pgd, 0);
22809 - pmd_table = pmd_offset(pud, 0);
22810 -
22811 - return pmd_table;
22812 -}
22813 -
22814 -/*
22815 * Create a page table and place a pointer to it in a middle page
22816 * directory entry:
22817 */
22818 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22819 page_table = (pte_t *)alloc_low_page();
22820
22821 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22822 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22823 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22824 +#else
22825 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22826 +#endif
22827 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22828 }
22829
22830 return pte_offset_kernel(pmd, 0);
22831 }
22832
22833 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22834 +{
22835 + pud_t *pud;
22836 + pmd_t *pmd_table;
22837 +
22838 + pud = pud_offset(pgd, 0);
22839 + pmd_table = pmd_offset(pud, 0);
22840 +
22841 + return pmd_table;
22842 +}
22843 +
22844 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22845 {
22846 int pgd_idx = pgd_index(vaddr);
22847 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22848 int pgd_idx, pmd_idx;
22849 unsigned long vaddr;
22850 pgd_t *pgd;
22851 + pud_t *pud;
22852 pmd_t *pmd;
22853 pte_t *pte = NULL;
22854
22855 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22856 pgd = pgd_base + pgd_idx;
22857
22858 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22859 - pmd = one_md_table_init(pgd);
22860 - pmd = pmd + pmd_index(vaddr);
22861 + pud = pud_offset(pgd, vaddr);
22862 + pmd = pmd_offset(pud, vaddr);
22863 +
22864 +#ifdef CONFIG_X86_PAE
22865 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22866 +#endif
22867 +
22868 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22869 pmd++, pmd_idx++) {
22870 pte = page_table_kmap_check(one_page_table_init(pmd),
22871 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22872 }
22873 }
22874
22875 -static inline int is_kernel_text(unsigned long addr)
22876 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22877 {
22878 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22879 - return 1;
22880 - return 0;
22881 + if ((start > ktla_ktva((unsigned long)_etext) ||
22882 + end <= ktla_ktva((unsigned long)_stext)) &&
22883 + (start > ktla_ktva((unsigned long)_einittext) ||
22884 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22885 +
22886 +#ifdef CONFIG_ACPI_SLEEP
22887 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22888 +#endif
22889 +
22890 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22891 + return 0;
22892 + return 1;
22893 }
22894
22895 /*
22896 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22897 unsigned long last_map_addr = end;
22898 unsigned long start_pfn, end_pfn;
22899 pgd_t *pgd_base = swapper_pg_dir;
22900 - int pgd_idx, pmd_idx, pte_ofs;
22901 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22902 unsigned long pfn;
22903 pgd_t *pgd;
22904 + pud_t *pud;
22905 pmd_t *pmd;
22906 pte_t *pte;
22907 unsigned pages_2m, pages_4k;
22908 @@ -281,8 +282,13 @@ repeat:
22909 pfn = start_pfn;
22910 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22911 pgd = pgd_base + pgd_idx;
22912 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22913 - pmd = one_md_table_init(pgd);
22914 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22915 + pud = pud_offset(pgd, 0);
22916 + pmd = pmd_offset(pud, 0);
22917 +
22918 +#ifdef CONFIG_X86_PAE
22919 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22920 +#endif
22921
22922 if (pfn >= end_pfn)
22923 continue;
22924 @@ -294,14 +300,13 @@ repeat:
22925 #endif
22926 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22927 pmd++, pmd_idx++) {
22928 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22929 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22930
22931 /*
22932 * Map with big pages if possible, otherwise
22933 * create normal page tables:
22934 */
22935 if (use_pse) {
22936 - unsigned int addr2;
22937 pgprot_t prot = PAGE_KERNEL_LARGE;
22938 /*
22939 * first pass will use the same initial
22940 @@ -311,11 +316,7 @@ repeat:
22941 __pgprot(PTE_IDENT_ATTR |
22942 _PAGE_PSE);
22943
22944 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22945 - PAGE_OFFSET + PAGE_SIZE-1;
22946 -
22947 - if (is_kernel_text(addr) ||
22948 - is_kernel_text(addr2))
22949 + if (is_kernel_text(address, address + PMD_SIZE))
22950 prot = PAGE_KERNEL_LARGE_EXEC;
22951
22952 pages_2m++;
22953 @@ -332,7 +333,7 @@ repeat:
22954 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22955 pte += pte_ofs;
22956 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22957 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22958 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22959 pgprot_t prot = PAGE_KERNEL;
22960 /*
22961 * first pass will use the same initial
22962 @@ -340,7 +341,7 @@ repeat:
22963 */
22964 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22965
22966 - if (is_kernel_text(addr))
22967 + if (is_kernel_text(address, address + PAGE_SIZE))
22968 prot = PAGE_KERNEL_EXEC;
22969
22970 pages_4k++;
22971 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22972
22973 pud = pud_offset(pgd, va);
22974 pmd = pmd_offset(pud, va);
22975 - if (!pmd_present(*pmd))
22976 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22977 break;
22978
22979 pte = pte_offset_kernel(pmd, va);
22980 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22981
22982 static void __init pagetable_init(void)
22983 {
22984 - pgd_t *pgd_base = swapper_pg_dir;
22985 -
22986 - permanent_kmaps_init(pgd_base);
22987 + permanent_kmaps_init(swapper_pg_dir);
22988 }
22989
22990 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22991 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22992 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22993
22994 /* user-defined highmem size */
22995 @@ -757,6 +756,12 @@ void __init mem_init(void)
22996
22997 pci_iommu_alloc();
22998
22999 +#ifdef CONFIG_PAX_PER_CPU_PGD
23000 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23001 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23002 + KERNEL_PGD_PTRS);
23003 +#endif
23004 +
23005 #ifdef CONFIG_FLATMEM
23006 BUG_ON(!mem_map);
23007 #endif
23008 @@ -774,7 +779,7 @@ void __init mem_init(void)
23009 set_highmem_pages_init();
23010
23011 codesize = (unsigned long) &_etext - (unsigned long) &_text;
23012 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
23013 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
23014 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
23015
23016 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
23017 @@ -815,10 +820,10 @@ void __init mem_init(void)
23018 ((unsigned long)&__init_end -
23019 (unsigned long)&__init_begin) >> 10,
23020
23021 - (unsigned long)&_etext, (unsigned long)&_edata,
23022 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
23023 + (unsigned long)&_sdata, (unsigned long)&_edata,
23024 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
23025
23026 - (unsigned long)&_text, (unsigned long)&_etext,
23027 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
23028 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
23029
23030 /*
23031 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
23032 if (!kernel_set_to_readonly)
23033 return;
23034
23035 + start = ktla_ktva(start);
23036 pr_debug("Set kernel text: %lx - %lx for read write\n",
23037 start, start+size);
23038
23039 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
23040 if (!kernel_set_to_readonly)
23041 return;
23042
23043 + start = ktla_ktva(start);
23044 pr_debug("Set kernel text: %lx - %lx for read only\n",
23045 start, start+size);
23046
23047 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
23048 unsigned long start = PFN_ALIGN(_text);
23049 unsigned long size = PFN_ALIGN(_etext) - start;
23050
23051 + start = ktla_ktva(start);
23052 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
23053 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
23054 size >> 10);
23055 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
23056 index bbaaa00..796fa65 100644
23057 --- a/arch/x86/mm/init_64.c
23058 +++ b/arch/x86/mm/init_64.c
23059 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
23060 * around without checking the pgd every time.
23061 */
23062
23063 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
23064 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
23065 EXPORT_SYMBOL_GPL(__supported_pte_mask);
23066
23067 int force_personality32;
23068 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23069
23070 for (address = start; address <= end; address += PGDIR_SIZE) {
23071 const pgd_t *pgd_ref = pgd_offset_k(address);
23072 +
23073 +#ifdef CONFIG_PAX_PER_CPU_PGD
23074 + unsigned long cpu;
23075 +#else
23076 struct page *page;
23077 +#endif
23078
23079 if (pgd_none(*pgd_ref))
23080 continue;
23081
23082 spin_lock(&pgd_lock);
23083 +
23084 +#ifdef CONFIG_PAX_PER_CPU_PGD
23085 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23086 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23087 +#else
23088 list_for_each_entry(page, &pgd_list, lru) {
23089 pgd_t *pgd;
23090 spinlock_t *pgt_lock;
23091 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23092 /* the pgt_lock only for Xen */
23093 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23094 spin_lock(pgt_lock);
23095 +#endif
23096
23097 if (pgd_none(*pgd))
23098 set_pgd(pgd, *pgd_ref);
23099 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
23100 BUG_ON(pgd_page_vaddr(*pgd)
23101 != pgd_page_vaddr(*pgd_ref));
23102
23103 +#ifndef CONFIG_PAX_PER_CPU_PGD
23104 spin_unlock(pgt_lock);
23105 +#endif
23106 +
23107 }
23108 spin_unlock(&pgd_lock);
23109 }
23110 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
23111 pmd = fill_pmd(pud, vaddr);
23112 pte = fill_pte(pmd, vaddr);
23113
23114 + pax_open_kernel();
23115 set_pte(pte, new_pte);
23116 + pax_close_kernel();
23117
23118 /*
23119 * It's enough to flush this one mapping.
23120 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
23121 pgd = pgd_offset_k((unsigned long)__va(phys));
23122 if (pgd_none(*pgd)) {
23123 pud = (pud_t *) spp_getpage();
23124 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
23125 - _PAGE_USER));
23126 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
23127 }
23128 pud = pud_offset(pgd, (unsigned long)__va(phys));
23129 if (pud_none(*pud)) {
23130 pmd = (pmd_t *) spp_getpage();
23131 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
23132 - _PAGE_USER));
23133 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
23134 }
23135 pmd = pmd_offset(pud, phys);
23136 BUG_ON(!pmd_none(*pmd));
23137 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
23138 if (pfn >= pgt_buf_top)
23139 panic("alloc_low_page: ran out of memory");
23140
23141 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23142 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
23143 clear_page(adr);
23144 *phys = pfn * PAGE_SIZE;
23145 return adr;
23146 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
23147
23148 phys = __pa(virt);
23149 left = phys & (PAGE_SIZE - 1);
23150 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23151 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
23152 adr = (void *)(((unsigned long)adr) | left);
23153
23154 return adr;
23155 @@ -693,6 +707,12 @@ void __init mem_init(void)
23156
23157 pci_iommu_alloc();
23158
23159 +#ifdef CONFIG_PAX_PER_CPU_PGD
23160 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
23161 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23162 + KERNEL_PGD_PTRS);
23163 +#endif
23164 +
23165 /* clear_bss() already clear the empty_zero_page */
23166
23167 reservedpages = 0;
23168 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
23169 static struct vm_area_struct gate_vma = {
23170 .vm_start = VSYSCALL_START,
23171 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
23172 - .vm_page_prot = PAGE_READONLY_EXEC,
23173 - .vm_flags = VM_READ | VM_EXEC
23174 + .vm_page_prot = PAGE_READONLY,
23175 + .vm_flags = VM_READ
23176 };
23177
23178 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
23179 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
23180
23181 const char *arch_vma_name(struct vm_area_struct *vma)
23182 {
23183 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23184 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23185 return "[vdso]";
23186 if (vma == &gate_vma)
23187 return "[vsyscall]";
23188 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
23189 index 7b179b4..6bd1777 100644
23190 --- a/arch/x86/mm/iomap_32.c
23191 +++ b/arch/x86/mm/iomap_32.c
23192 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
23193 type = kmap_atomic_idx_push();
23194 idx = type + KM_TYPE_NR * smp_processor_id();
23195 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23196 +
23197 + pax_open_kernel();
23198 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
23199 + pax_close_kernel();
23200 +
23201 arch_flush_lazy_mmu_mode();
23202
23203 return (void *)vaddr;
23204 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
23205 index be1ef57..55f0160 100644
23206 --- a/arch/x86/mm/ioremap.c
23207 +++ b/arch/x86/mm/ioremap.c
23208 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
23209 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
23210 int is_ram = page_is_ram(pfn);
23211
23212 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
23213 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
23214 return NULL;
23215 WARN_ON_ONCE(is_ram);
23216 }
23217 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
23218
23219 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
23220 if (page_is_ram(start >> PAGE_SHIFT))
23221 +#ifdef CONFIG_HIGHMEM
23222 + if ((start >> PAGE_SHIFT) < max_low_pfn)
23223 +#endif
23224 return __va(phys);
23225
23226 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
23227 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23228 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23229
23230 static __initdata int after_paging_init;
23231 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23232 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23233
23234 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23235 {
23236 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23237 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23238
23239 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23240 - memset(bm_pte, 0, sizeof(bm_pte));
23241 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23242 + pmd_populate_user(&init_mm, pmd, bm_pte);
23243
23244 /*
23245 * The boot-ioremap range spans multiple pmds, for which
23246 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23247 index d87dd6d..bf3fa66 100644
23248 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23249 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23250 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23251 * memory (e.g. tracked pages)? For now, we need this to avoid
23252 * invoking kmemcheck for PnP BIOS calls.
23253 */
23254 - if (regs->flags & X86_VM_MASK)
23255 + if (v8086_mode(regs))
23256 return false;
23257 - if (regs->cs != __KERNEL_CS)
23258 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23259 return false;
23260
23261 pte = kmemcheck_pte_lookup(address);
23262 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23263 index 845df68..1d8d29f 100644
23264 --- a/arch/x86/mm/mmap.c
23265 +++ b/arch/x86/mm/mmap.c
23266 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23267 * Leave an at least ~128 MB hole with possible stack randomization.
23268 */
23269 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23270 -#define MAX_GAP (TASK_SIZE/6*5)
23271 +#define MAX_GAP (pax_task_size/6*5)
23272
23273 static int mmap_is_legacy(void)
23274 {
23275 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23276 return rnd << PAGE_SHIFT;
23277 }
23278
23279 -static unsigned long mmap_base(void)
23280 +static unsigned long mmap_base(struct mm_struct *mm)
23281 {
23282 unsigned long gap = rlimit(RLIMIT_STACK);
23283 + unsigned long pax_task_size = TASK_SIZE;
23284 +
23285 +#ifdef CONFIG_PAX_SEGMEXEC
23286 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23287 + pax_task_size = SEGMEXEC_TASK_SIZE;
23288 +#endif
23289
23290 if (gap < MIN_GAP)
23291 gap = MIN_GAP;
23292 else if (gap > MAX_GAP)
23293 gap = MAX_GAP;
23294
23295 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23296 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23297 }
23298
23299 /*
23300 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23301 * does, but not when emulating X86_32
23302 */
23303 -static unsigned long mmap_legacy_base(void)
23304 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23305 {
23306 - if (mmap_is_ia32())
23307 + if (mmap_is_ia32()) {
23308 +
23309 +#ifdef CONFIG_PAX_SEGMEXEC
23310 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23311 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23312 + else
23313 +#endif
23314 +
23315 return TASK_UNMAPPED_BASE;
23316 - else
23317 + } else
23318 return TASK_UNMAPPED_BASE + mmap_rnd();
23319 }
23320
23321 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23322 void arch_pick_mmap_layout(struct mm_struct *mm)
23323 {
23324 if (mmap_is_legacy()) {
23325 - mm->mmap_base = mmap_legacy_base();
23326 + mm->mmap_base = mmap_legacy_base(mm);
23327 +
23328 +#ifdef CONFIG_PAX_RANDMMAP
23329 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23330 + mm->mmap_base += mm->delta_mmap;
23331 +#endif
23332 +
23333 mm->get_unmapped_area = arch_get_unmapped_area;
23334 mm->unmap_area = arch_unmap_area;
23335 } else {
23336 - mm->mmap_base = mmap_base();
23337 + mm->mmap_base = mmap_base(mm);
23338 +
23339 +#ifdef CONFIG_PAX_RANDMMAP
23340 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23341 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23342 +#endif
23343 +
23344 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23345 mm->unmap_area = arch_unmap_area_topdown;
23346 }
23347 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23348 index de54b9b..799051e 100644
23349 --- a/arch/x86/mm/mmio-mod.c
23350 +++ b/arch/x86/mm/mmio-mod.c
23351 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23352 break;
23353 default:
23354 {
23355 - unsigned char *ip = (unsigned char *)instptr;
23356 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23357 my_trace->opcode = MMIO_UNKNOWN_OP;
23358 my_trace->width = 0;
23359 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23360 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23361 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23362 void __iomem *addr)
23363 {
23364 - static atomic_t next_id;
23365 + static atomic_unchecked_t next_id;
23366 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23367 /* These are page-unaligned. */
23368 struct mmiotrace_map map = {
23369 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23370 .private = trace
23371 },
23372 .phys = offset,
23373 - .id = atomic_inc_return(&next_id)
23374 + .id = atomic_inc_return_unchecked(&next_id)
23375 };
23376 map.map_id = trace->id;
23377
23378 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23379 index b008656..773eac2 100644
23380 --- a/arch/x86/mm/pageattr-test.c
23381 +++ b/arch/x86/mm/pageattr-test.c
23382 @@ -36,7 +36,7 @@ enum {
23383
23384 static int pte_testbit(pte_t pte)
23385 {
23386 - return pte_flags(pte) & _PAGE_UNUSED1;
23387 + return pte_flags(pte) & _PAGE_CPA_TEST;
23388 }
23389
23390 struct split_state {
23391 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23392 index f9e5267..77b1a40 100644
23393 --- a/arch/x86/mm/pageattr.c
23394 +++ b/arch/x86/mm/pageattr.c
23395 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23396 */
23397 #ifdef CONFIG_PCI_BIOS
23398 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23399 - pgprot_val(forbidden) |= _PAGE_NX;
23400 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23401 #endif
23402
23403 /*
23404 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23405 * Does not cover __inittext since that is gone later on. On
23406 * 64bit we do not enforce !NX on the low mapping
23407 */
23408 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23409 - pgprot_val(forbidden) |= _PAGE_NX;
23410 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23411 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23412
23413 +#ifdef CONFIG_DEBUG_RODATA
23414 /*
23415 * The .rodata section needs to be read-only. Using the pfn
23416 * catches all aliases.
23417 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23418 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23419 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23420 pgprot_val(forbidden) |= _PAGE_RW;
23421 +#endif
23422
23423 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23424 /*
23425 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23426 }
23427 #endif
23428
23429 +#ifdef CONFIG_PAX_KERNEXEC
23430 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23431 + pgprot_val(forbidden) |= _PAGE_RW;
23432 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23433 + }
23434 +#endif
23435 +
23436 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23437
23438 return prot;
23439 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23440 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23441 {
23442 /* change init_mm */
23443 + pax_open_kernel();
23444 set_pte_atomic(kpte, pte);
23445 +
23446 #ifdef CONFIG_X86_32
23447 if (!SHARED_KERNEL_PMD) {
23448 +
23449 +#ifdef CONFIG_PAX_PER_CPU_PGD
23450 + unsigned long cpu;
23451 +#else
23452 struct page *page;
23453 +#endif
23454
23455 +#ifdef CONFIG_PAX_PER_CPU_PGD
23456 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23457 + pgd_t *pgd = get_cpu_pgd(cpu);
23458 +#else
23459 list_for_each_entry(page, &pgd_list, lru) {
23460 - pgd_t *pgd;
23461 + pgd_t *pgd = (pgd_t *)page_address(page);
23462 +#endif
23463 +
23464 pud_t *pud;
23465 pmd_t *pmd;
23466
23467 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23468 + pgd += pgd_index(address);
23469 pud = pud_offset(pgd, address);
23470 pmd = pmd_offset(pud, address);
23471 set_pte_atomic((pte_t *)pmd, pte);
23472 }
23473 }
23474 #endif
23475 + pax_close_kernel();
23476 }
23477
23478 static int
23479 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23480 index f6ff57b..481690f 100644
23481 --- a/arch/x86/mm/pat.c
23482 +++ b/arch/x86/mm/pat.c
23483 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23484
23485 if (!entry) {
23486 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23487 - current->comm, current->pid, start, end);
23488 + current->comm, task_pid_nr(current), start, end);
23489 return -EINVAL;
23490 }
23491
23492 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23493 while (cursor < to) {
23494 if (!devmem_is_allowed(pfn)) {
23495 printk(KERN_INFO
23496 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23497 - current->comm, from, to);
23498 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23499 + current->comm, from, to, cursor);
23500 return 0;
23501 }
23502 cursor += PAGE_SIZE;
23503 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23504 printk(KERN_INFO
23505 "%s:%d ioremap_change_attr failed %s "
23506 "for %Lx-%Lx\n",
23507 - current->comm, current->pid,
23508 + current->comm, task_pid_nr(current),
23509 cattr_name(flags),
23510 base, (unsigned long long)(base + size));
23511 return -EINVAL;
23512 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23513 if (want_flags != flags) {
23514 printk(KERN_WARNING
23515 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23516 - current->comm, current->pid,
23517 + current->comm, task_pid_nr(current),
23518 cattr_name(want_flags),
23519 (unsigned long long)paddr,
23520 (unsigned long long)(paddr + size),
23521 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23522 free_memtype(paddr, paddr + size);
23523 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23524 " for %Lx-%Lx, got %s\n",
23525 - current->comm, current->pid,
23526 + current->comm, task_pid_nr(current),
23527 cattr_name(want_flags),
23528 (unsigned long long)paddr,
23529 (unsigned long long)(paddr + size),
23530 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23531 index 9f0614d..92ae64a 100644
23532 --- a/arch/x86/mm/pf_in.c
23533 +++ b/arch/x86/mm/pf_in.c
23534 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23535 int i;
23536 enum reason_type rv = OTHERS;
23537
23538 - p = (unsigned char *)ins_addr;
23539 + p = (unsigned char *)ktla_ktva(ins_addr);
23540 p += skip_prefix(p, &prf);
23541 p += get_opcode(p, &opcode);
23542
23543 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23544 struct prefix_bits prf;
23545 int i;
23546
23547 - p = (unsigned char *)ins_addr;
23548 + p = (unsigned char *)ktla_ktva(ins_addr);
23549 p += skip_prefix(p, &prf);
23550 p += get_opcode(p, &opcode);
23551
23552 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23553 struct prefix_bits prf;
23554 int i;
23555
23556 - p = (unsigned char *)ins_addr;
23557 + p = (unsigned char *)ktla_ktva(ins_addr);
23558 p += skip_prefix(p, &prf);
23559 p += get_opcode(p, &opcode);
23560
23561 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23562 struct prefix_bits prf;
23563 int i;
23564
23565 - p = (unsigned char *)ins_addr;
23566 + p = (unsigned char *)ktla_ktva(ins_addr);
23567 p += skip_prefix(p, &prf);
23568 p += get_opcode(p, &opcode);
23569 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23570 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23571 struct prefix_bits prf;
23572 int i;
23573
23574 - p = (unsigned char *)ins_addr;
23575 + p = (unsigned char *)ktla_ktva(ins_addr);
23576 p += skip_prefix(p, &prf);
23577 p += get_opcode(p, &opcode);
23578 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23579 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23580 index 8573b83..c3b1a30 100644
23581 --- a/arch/x86/mm/pgtable.c
23582 +++ b/arch/x86/mm/pgtable.c
23583 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23584 list_del(&page->lru);
23585 }
23586
23587 -#define UNSHARED_PTRS_PER_PGD \
23588 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23589 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23590 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23591
23592 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23593 +{
23594 + while (count--)
23595 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23596 +}
23597 +#endif
23598
23599 +#ifdef CONFIG_PAX_PER_CPU_PGD
23600 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23601 +{
23602 + while (count--)
23603 +
23604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23605 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23606 +#else
23607 + *dst++ = *src++;
23608 +#endif
23609 +
23610 +}
23611 +#endif
23612 +
23613 +#ifdef CONFIG_X86_64
23614 +#define pxd_t pud_t
23615 +#define pyd_t pgd_t
23616 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23617 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23618 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23619 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
23620 +#define PYD_SIZE PGDIR_SIZE
23621 +#else
23622 +#define pxd_t pmd_t
23623 +#define pyd_t pud_t
23624 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23625 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23626 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23627 +#define pyd_offset(mm, address) pud_offset((mm), (address))
23628 +#define PYD_SIZE PUD_SIZE
23629 +#endif
23630 +
23631 +#ifdef CONFIG_PAX_PER_CPU_PGD
23632 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23633 +static inline void pgd_dtor(pgd_t *pgd) {}
23634 +#else
23635 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23636 {
23637 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23638 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23639 pgd_list_del(pgd);
23640 spin_unlock(&pgd_lock);
23641 }
23642 +#endif
23643
23644 /*
23645 * List of all pgd's needed for non-PAE so it can invalidate entries
23646 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23647 * -- wli
23648 */
23649
23650 -#ifdef CONFIG_X86_PAE
23651 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23652 /*
23653 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23654 * updating the top-level pagetable entries to guarantee the
23655 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23656 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23657 * and initialize the kernel pmds here.
23658 */
23659 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23660 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23661
23662 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23663 {
23664 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23665 */
23666 flush_tlb_mm(mm);
23667 }
23668 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23669 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23670 #else /* !CONFIG_X86_PAE */
23671
23672 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23673 -#define PREALLOCATED_PMDS 0
23674 +#define PREALLOCATED_PXDS 0
23675
23676 #endif /* CONFIG_X86_PAE */
23677
23678 -static void free_pmds(pmd_t *pmds[])
23679 +static void free_pxds(pxd_t *pxds[])
23680 {
23681 int i;
23682
23683 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23684 - if (pmds[i])
23685 - free_page((unsigned long)pmds[i]);
23686 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23687 + if (pxds[i])
23688 + free_page((unsigned long)pxds[i]);
23689 }
23690
23691 -static int preallocate_pmds(pmd_t *pmds[])
23692 +static int preallocate_pxds(pxd_t *pxds[])
23693 {
23694 int i;
23695 bool failed = false;
23696
23697 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23698 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23699 - if (pmd == NULL)
23700 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23701 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23702 + if (pxd == NULL)
23703 failed = true;
23704 - pmds[i] = pmd;
23705 + pxds[i] = pxd;
23706 }
23707
23708 if (failed) {
23709 - free_pmds(pmds);
23710 + free_pxds(pxds);
23711 return -ENOMEM;
23712 }
23713
23714 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23715 * preallocate which never got a corresponding vma will need to be
23716 * freed manually.
23717 */
23718 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23719 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23720 {
23721 int i;
23722
23723 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23724 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23725 pgd_t pgd = pgdp[i];
23726
23727 if (pgd_val(pgd) != 0) {
23728 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23729 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23730
23731 - pgdp[i] = native_make_pgd(0);
23732 + set_pgd(pgdp + i, native_make_pgd(0));
23733
23734 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23735 - pmd_free(mm, pmd);
23736 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23737 + pxd_free(mm, pxd);
23738 }
23739 }
23740 }
23741
23742 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23743 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23744 {
23745 - pud_t *pud;
23746 + pyd_t *pyd;
23747 unsigned long addr;
23748 int i;
23749
23750 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23751 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23752 return;
23753
23754 - pud = pud_offset(pgd, 0);
23755 +#ifdef CONFIG_X86_64
23756 + pyd = pyd_offset(mm, 0L);
23757 +#else
23758 + pyd = pyd_offset(pgd, 0L);
23759 +#endif
23760
23761 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23762 - i++, pud++, addr += PUD_SIZE) {
23763 - pmd_t *pmd = pmds[i];
23764 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23765 + i++, pyd++, addr += PYD_SIZE) {
23766 + pxd_t *pxd = pxds[i];
23767
23768 if (i >= KERNEL_PGD_BOUNDARY)
23769 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23770 - sizeof(pmd_t) * PTRS_PER_PMD);
23771 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23772 + sizeof(pxd_t) * PTRS_PER_PMD);
23773
23774 - pud_populate(mm, pud, pmd);
23775 + pyd_populate(mm, pyd, pxd);
23776 }
23777 }
23778
23779 pgd_t *pgd_alloc(struct mm_struct *mm)
23780 {
23781 pgd_t *pgd;
23782 - pmd_t *pmds[PREALLOCATED_PMDS];
23783 + pxd_t *pxds[PREALLOCATED_PXDS];
23784
23785 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23786
23787 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23788
23789 mm->pgd = pgd;
23790
23791 - if (preallocate_pmds(pmds) != 0)
23792 + if (preallocate_pxds(pxds) != 0)
23793 goto out_free_pgd;
23794
23795 if (paravirt_pgd_alloc(mm) != 0)
23796 - goto out_free_pmds;
23797 + goto out_free_pxds;
23798
23799 /*
23800 * Make sure that pre-populating the pmds is atomic with
23801 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23802 spin_lock(&pgd_lock);
23803
23804 pgd_ctor(mm, pgd);
23805 - pgd_prepopulate_pmd(mm, pgd, pmds);
23806 + pgd_prepopulate_pxd(mm, pgd, pxds);
23807
23808 spin_unlock(&pgd_lock);
23809
23810 return pgd;
23811
23812 -out_free_pmds:
23813 - free_pmds(pmds);
23814 +out_free_pxds:
23815 + free_pxds(pxds);
23816 out_free_pgd:
23817 free_page((unsigned long)pgd);
23818 out:
23819 @@ -295,7 +344,7 @@ out:
23820
23821 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23822 {
23823 - pgd_mop_up_pmds(mm, pgd);
23824 + pgd_mop_up_pxds(mm, pgd);
23825 pgd_dtor(pgd);
23826 paravirt_pgd_free(mm, pgd);
23827 free_page((unsigned long)pgd);
23828 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23829 index cac7184..09a39fa 100644
23830 --- a/arch/x86/mm/pgtable_32.c
23831 +++ b/arch/x86/mm/pgtable_32.c
23832 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23833 return;
23834 }
23835 pte = pte_offset_kernel(pmd, vaddr);
23836 +
23837 + pax_open_kernel();
23838 if (pte_val(pteval))
23839 set_pte_at(&init_mm, vaddr, pte, pteval);
23840 else
23841 pte_clear(&init_mm, vaddr, pte);
23842 + pax_close_kernel();
23843
23844 /*
23845 * It's enough to flush this one mapping.
23846 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23847 index 410531d..0f16030 100644
23848 --- a/arch/x86/mm/setup_nx.c
23849 +++ b/arch/x86/mm/setup_nx.c
23850 @@ -5,8 +5,10 @@
23851 #include <asm/pgtable.h>
23852 #include <asm/proto.h>
23853
23854 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23855 static int disable_nx __cpuinitdata;
23856
23857 +#ifndef CONFIG_PAX_PAGEEXEC
23858 /*
23859 * noexec = on|off
23860 *
23861 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23862 return 0;
23863 }
23864 early_param("noexec", noexec_setup);
23865 +#endif
23866 +
23867 +#endif
23868
23869 void __cpuinit x86_configure_nx(void)
23870 {
23871 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23872 if (cpu_has_nx && !disable_nx)
23873 __supported_pte_mask |= _PAGE_NX;
23874 else
23875 +#endif
23876 __supported_pte_mask &= ~_PAGE_NX;
23877 }
23878
23879 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23880 index d6c0418..06a0ad5 100644
23881 --- a/arch/x86/mm/tlb.c
23882 +++ b/arch/x86/mm/tlb.c
23883 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23884 BUG();
23885 cpumask_clear_cpu(cpu,
23886 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23887 +
23888 +#ifndef CONFIG_PAX_PER_CPU_PGD
23889 load_cr3(swapper_pg_dir);
23890 +#endif
23891 +
23892 }
23893 EXPORT_SYMBOL_GPL(leave_mm);
23894
23895 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23896 index 6687022..ceabcfa 100644
23897 --- a/arch/x86/net/bpf_jit.S
23898 +++ b/arch/x86/net/bpf_jit.S
23899 @@ -9,6 +9,7 @@
23900 */
23901 #include <linux/linkage.h>
23902 #include <asm/dwarf2.h>
23903 +#include <asm/alternative-asm.h>
23904
23905 /*
23906 * Calling convention :
23907 @@ -35,6 +36,7 @@ sk_load_word:
23908 jle bpf_slow_path_word
23909 mov (SKBDATA,%rsi),%eax
23910 bswap %eax /* ntohl() */
23911 + pax_force_retaddr
23912 ret
23913
23914
23915 @@ -53,6 +55,7 @@ sk_load_half:
23916 jle bpf_slow_path_half
23917 movzwl (SKBDATA,%rsi),%eax
23918 rol $8,%ax # ntohs()
23919 + pax_force_retaddr
23920 ret
23921
23922 sk_load_byte_ind:
23923 @@ -66,6 +69,7 @@ sk_load_byte:
23924 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23925 jle bpf_slow_path_byte
23926 movzbl (SKBDATA,%rsi),%eax
23927 + pax_force_retaddr
23928 ret
23929
23930 /**
23931 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23932 movzbl (SKBDATA,%rsi),%ebx
23933 and $15,%bl
23934 shl $2,%bl
23935 + pax_force_retaddr
23936 ret
23937 CFI_ENDPROC
23938 ENDPROC(sk_load_byte_msh)
23939 @@ -91,6 +96,7 @@ bpf_error:
23940 xor %eax,%eax
23941 mov -8(%rbp),%rbx
23942 leaveq
23943 + pax_force_retaddr
23944 ret
23945
23946 /* rsi contains offset and can be scratched */
23947 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23948 js bpf_error
23949 mov -12(%rbp),%eax
23950 bswap %eax
23951 + pax_force_retaddr
23952 ret
23953
23954 bpf_slow_path_half:
23955 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23956 mov -12(%rbp),%ax
23957 rol $8,%ax
23958 movzwl %ax,%eax
23959 + pax_force_retaddr
23960 ret
23961
23962 bpf_slow_path_byte:
23963 bpf_slow_path_common(1)
23964 js bpf_error
23965 movzbl -12(%rbp),%eax
23966 + pax_force_retaddr
23967 ret
23968
23969 bpf_slow_path_byte_msh:
23970 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23971 and $15,%al
23972 shl $2,%al
23973 xchg %eax,%ebx
23974 + pax_force_retaddr
23975 ret
23976 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23977 index 7c1b765..8c072c6 100644
23978 --- a/arch/x86/net/bpf_jit_comp.c
23979 +++ b/arch/x86/net/bpf_jit_comp.c
23980 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23981 set_fs(old_fs);
23982 }
23983
23984 +struct bpf_jit_work {
23985 + struct work_struct work;
23986 + void *image;
23987 +};
23988
23989 void bpf_jit_compile(struct sk_filter *fp)
23990 {
23991 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23992 if (addrs == NULL)
23993 return;
23994
23995 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23996 + if (!fp->work)
23997 + goto out;
23998 +
23999 /* Before first pass, make a rough estimation of addrs[]
24000 * each bpf instruction is translated to less than 64 bytes
24001 */
24002 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
24003 func = sk_load_word;
24004 common_load: seen |= SEEN_DATAREF;
24005 if ((int)K < 0)
24006 - goto out;
24007 + goto error;
24008 t_offset = func - (image + addrs[i]);
24009 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
24010 EMIT1_off32(0xe8, t_offset); /* call */
24011 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24012 break;
24013 default:
24014 /* hmm, too complex filter, give up with jit compiler */
24015 - goto out;
24016 + goto error;
24017 }
24018 ilen = prog - temp;
24019 if (image) {
24020 if (unlikely(proglen + ilen > oldproglen)) {
24021 pr_err("bpb_jit_compile fatal error\n");
24022 - kfree(addrs);
24023 - module_free(NULL, image);
24024 - return;
24025 + module_free_exec(NULL, image);
24026 + goto error;
24027 }
24028 + pax_open_kernel();
24029 memcpy(image + proglen, temp, ilen);
24030 + pax_close_kernel();
24031 }
24032 proglen += ilen;
24033 addrs[i] = proglen;
24034 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24035 break;
24036 }
24037 if (proglen == oldproglen) {
24038 - image = module_alloc(max_t(unsigned int,
24039 - proglen,
24040 - sizeof(struct work_struct)));
24041 + image = module_alloc_exec(proglen);
24042 if (!image)
24043 - goto out;
24044 + goto error;
24045 }
24046 oldproglen = proglen;
24047 }
24048 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
24049 bpf_flush_icache(image, image + proglen);
24050
24051 fp->bpf_func = (void *)image;
24052 - }
24053 + } else
24054 +error:
24055 + kfree(fp->work);
24056 +
24057 out:
24058 kfree(addrs);
24059 return;
24060 @@ -645,18 +655,20 @@ out:
24061
24062 static void jit_free_defer(struct work_struct *arg)
24063 {
24064 - module_free(NULL, arg);
24065 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
24066 + kfree(arg);
24067 }
24068
24069 /* run from softirq, we must use a work_struct to call
24070 - * module_free() from process context
24071 + * module_free_exec() from process context
24072 */
24073 void bpf_jit_free(struct sk_filter *fp)
24074 {
24075 if (fp->bpf_func != sk_run_filter) {
24076 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
24077 + struct work_struct *work = &fp->work->work;
24078
24079 INIT_WORK(work, jit_free_defer);
24080 + fp->work->image = fp->bpf_func;
24081 schedule_work(work);
24082 }
24083 }
24084 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
24085 index bff89df..377758a 100644
24086 --- a/arch/x86/oprofile/backtrace.c
24087 +++ b/arch/x86/oprofile/backtrace.c
24088 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
24089 struct stack_frame_ia32 *fp;
24090 unsigned long bytes;
24091
24092 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24093 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24094 if (bytes != sizeof(bufhead))
24095 return NULL;
24096
24097 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
24098 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
24099
24100 oprofile_add_trace(bufhead[0].return_address);
24101
24102 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
24103 struct stack_frame bufhead[2];
24104 unsigned long bytes;
24105
24106 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
24107 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
24108 if (bytes != sizeof(bufhead))
24109 return NULL;
24110
24111 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
24112 {
24113 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
24114
24115 - if (!user_mode_vm(regs)) {
24116 + if (!user_mode(regs)) {
24117 unsigned long stack = kernel_stack_pointer(regs);
24118 if (depth)
24119 dump_trace(NULL, regs, (unsigned long *)stack, 0,
24120 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
24121 index cb29191..036766d 100644
24122 --- a/arch/x86/pci/mrst.c
24123 +++ b/arch/x86/pci/mrst.c
24124 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
24125 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
24126 pci_mmcfg_late_init();
24127 pcibios_enable_irq = mrst_pci_irq_enable;
24128 - pci_root_ops = pci_mrst_ops;
24129 + pax_open_kernel();
24130 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
24131 + pax_close_kernel();
24132 /* Continue with standard init */
24133 return 1;
24134 }
24135 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
24136 index db0e9a5..0372c14 100644
24137 --- a/arch/x86/pci/pcbios.c
24138 +++ b/arch/x86/pci/pcbios.c
24139 @@ -79,50 +79,93 @@ union bios32 {
24140 static struct {
24141 unsigned long address;
24142 unsigned short segment;
24143 -} bios32_indirect = { 0, __KERNEL_CS };
24144 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
24145
24146 /*
24147 * Returns the entry point for the given service, NULL on error
24148 */
24149
24150 -static unsigned long bios32_service(unsigned long service)
24151 +static unsigned long __devinit bios32_service(unsigned long service)
24152 {
24153 unsigned char return_code; /* %al */
24154 unsigned long address; /* %ebx */
24155 unsigned long length; /* %ecx */
24156 unsigned long entry; /* %edx */
24157 unsigned long flags;
24158 + struct desc_struct d, *gdt;
24159
24160 local_irq_save(flags);
24161 - __asm__("lcall *(%%edi); cld"
24162 +
24163 + gdt = get_cpu_gdt_table(smp_processor_id());
24164 +
24165 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
24166 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24167 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
24168 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24169 +
24170 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
24171 : "=a" (return_code),
24172 "=b" (address),
24173 "=c" (length),
24174 "=d" (entry)
24175 : "0" (service),
24176 "1" (0),
24177 - "D" (&bios32_indirect));
24178 + "D" (&bios32_indirect),
24179 + "r"(__PCIBIOS_DS)
24180 + : "memory");
24181 +
24182 + pax_open_kernel();
24183 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
24184 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
24185 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
24186 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
24187 + pax_close_kernel();
24188 +
24189 local_irq_restore(flags);
24190
24191 switch (return_code) {
24192 - case 0:
24193 - return address + entry;
24194 - case 0x80: /* Not present */
24195 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24196 - return 0;
24197 - default: /* Shouldn't happen */
24198 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24199 - service, return_code);
24200 + case 0: {
24201 + int cpu;
24202 + unsigned char flags;
24203 +
24204 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
24205 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
24206 + printk(KERN_WARNING "bios32_service: not valid\n");
24207 return 0;
24208 + }
24209 + address = address + PAGE_OFFSET;
24210 + length += 16UL; /* some BIOSs underreport this... */
24211 + flags = 4;
24212 + if (length >= 64*1024*1024) {
24213 + length >>= PAGE_SHIFT;
24214 + flags |= 8;
24215 + }
24216 +
24217 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24218 + gdt = get_cpu_gdt_table(cpu);
24219 + pack_descriptor(&d, address, length, 0x9b, flags);
24220 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
24221 + pack_descriptor(&d, address, length, 0x93, flags);
24222 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
24223 + }
24224 + return entry;
24225 + }
24226 + case 0x80: /* Not present */
24227 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24228 + return 0;
24229 + default: /* Shouldn't happen */
24230 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24231 + service, return_code);
24232 + return 0;
24233 }
24234 }
24235
24236 static struct {
24237 unsigned long address;
24238 unsigned short segment;
24239 -} pci_indirect = { 0, __KERNEL_CS };
24240 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24241
24242 -static int pci_bios_present;
24243 +static int pci_bios_present __read_only;
24244
24245 static int __devinit check_pcibios(void)
24246 {
24247 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24248 unsigned long flags, pcibios_entry;
24249
24250 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24251 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24252 + pci_indirect.address = pcibios_entry;
24253
24254 local_irq_save(flags);
24255 - __asm__(
24256 - "lcall *(%%edi); cld\n\t"
24257 + __asm__("movw %w6, %%ds\n\t"
24258 + "lcall *%%ss:(%%edi); cld\n\t"
24259 + "push %%ss\n\t"
24260 + "pop %%ds\n\t"
24261 "jc 1f\n\t"
24262 "xor %%ah, %%ah\n"
24263 "1:"
24264 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24265 "=b" (ebx),
24266 "=c" (ecx)
24267 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24268 - "D" (&pci_indirect)
24269 + "D" (&pci_indirect),
24270 + "r" (__PCIBIOS_DS)
24271 : "memory");
24272 local_irq_restore(flags);
24273
24274 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24275
24276 switch (len) {
24277 case 1:
24278 - __asm__("lcall *(%%esi); cld\n\t"
24279 + __asm__("movw %w6, %%ds\n\t"
24280 + "lcall *%%ss:(%%esi); cld\n\t"
24281 + "push %%ss\n\t"
24282 + "pop %%ds\n\t"
24283 "jc 1f\n\t"
24284 "xor %%ah, %%ah\n"
24285 "1:"
24286 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24287 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24288 "b" (bx),
24289 "D" ((long)reg),
24290 - "S" (&pci_indirect));
24291 + "S" (&pci_indirect),
24292 + "r" (__PCIBIOS_DS));
24293 /*
24294 * Zero-extend the result beyond 8 bits, do not trust the
24295 * BIOS having done it:
24296 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24297 *value &= 0xff;
24298 break;
24299 case 2:
24300 - __asm__("lcall *(%%esi); cld\n\t"
24301 + __asm__("movw %w6, %%ds\n\t"
24302 + "lcall *%%ss:(%%esi); cld\n\t"
24303 + "push %%ss\n\t"
24304 + "pop %%ds\n\t"
24305 "jc 1f\n\t"
24306 "xor %%ah, %%ah\n"
24307 "1:"
24308 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24309 : "1" (PCIBIOS_READ_CONFIG_WORD),
24310 "b" (bx),
24311 "D" ((long)reg),
24312 - "S" (&pci_indirect));
24313 + "S" (&pci_indirect),
24314 + "r" (__PCIBIOS_DS));
24315 /*
24316 * Zero-extend the result beyond 16 bits, do not trust the
24317 * BIOS having done it:
24318 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24319 *value &= 0xffff;
24320 break;
24321 case 4:
24322 - __asm__("lcall *(%%esi); cld\n\t"
24323 + __asm__("movw %w6, %%ds\n\t"
24324 + "lcall *%%ss:(%%esi); cld\n\t"
24325 + "push %%ss\n\t"
24326 + "pop %%ds\n\t"
24327 "jc 1f\n\t"
24328 "xor %%ah, %%ah\n"
24329 "1:"
24330 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24331 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24332 "b" (bx),
24333 "D" ((long)reg),
24334 - "S" (&pci_indirect));
24335 + "S" (&pci_indirect),
24336 + "r" (__PCIBIOS_DS));
24337 break;
24338 }
24339
24340 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24341
24342 switch (len) {
24343 case 1:
24344 - __asm__("lcall *(%%esi); cld\n\t"
24345 + __asm__("movw %w6, %%ds\n\t"
24346 + "lcall *%%ss:(%%esi); cld\n\t"
24347 + "push %%ss\n\t"
24348 + "pop %%ds\n\t"
24349 "jc 1f\n\t"
24350 "xor %%ah, %%ah\n"
24351 "1:"
24352 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24353 "c" (value),
24354 "b" (bx),
24355 "D" ((long)reg),
24356 - "S" (&pci_indirect));
24357 + "S" (&pci_indirect),
24358 + "r" (__PCIBIOS_DS));
24359 break;
24360 case 2:
24361 - __asm__("lcall *(%%esi); cld\n\t"
24362 + __asm__("movw %w6, %%ds\n\t"
24363 + "lcall *%%ss:(%%esi); cld\n\t"
24364 + "push %%ss\n\t"
24365 + "pop %%ds\n\t"
24366 "jc 1f\n\t"
24367 "xor %%ah, %%ah\n"
24368 "1:"
24369 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24370 "c" (value),
24371 "b" (bx),
24372 "D" ((long)reg),
24373 - "S" (&pci_indirect));
24374 + "S" (&pci_indirect),
24375 + "r" (__PCIBIOS_DS));
24376 break;
24377 case 4:
24378 - __asm__("lcall *(%%esi); cld\n\t"
24379 + __asm__("movw %w6, %%ds\n\t"
24380 + "lcall *%%ss:(%%esi); cld\n\t"
24381 + "push %%ss\n\t"
24382 + "pop %%ds\n\t"
24383 "jc 1f\n\t"
24384 "xor %%ah, %%ah\n"
24385 "1:"
24386 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24387 "c" (value),
24388 "b" (bx),
24389 "D" ((long)reg),
24390 - "S" (&pci_indirect));
24391 + "S" (&pci_indirect),
24392 + "r" (__PCIBIOS_DS));
24393 break;
24394 }
24395
24396 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24397
24398 DBG("PCI: Fetching IRQ routing table... ");
24399 __asm__("push %%es\n\t"
24400 + "movw %w8, %%ds\n\t"
24401 "push %%ds\n\t"
24402 "pop %%es\n\t"
24403 - "lcall *(%%esi); cld\n\t"
24404 + "lcall *%%ss:(%%esi); cld\n\t"
24405 "pop %%es\n\t"
24406 + "push %%ss\n\t"
24407 + "pop %%ds\n"
24408 "jc 1f\n\t"
24409 "xor %%ah, %%ah\n"
24410 "1:"
24411 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24412 "1" (0),
24413 "D" ((long) &opt),
24414 "S" (&pci_indirect),
24415 - "m" (opt)
24416 + "m" (opt),
24417 + "r" (__PCIBIOS_DS)
24418 : "memory");
24419 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24420 if (ret & 0xff00)
24421 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24422 {
24423 int ret;
24424
24425 - __asm__("lcall *(%%esi); cld\n\t"
24426 + __asm__("movw %w5, %%ds\n\t"
24427 + "lcall *%%ss:(%%esi); cld\n\t"
24428 + "push %%ss\n\t"
24429 + "pop %%ds\n"
24430 "jc 1f\n\t"
24431 "xor %%ah, %%ah\n"
24432 "1:"
24433 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24434 : "0" (PCIBIOS_SET_PCI_HW_INT),
24435 "b" ((dev->bus->number << 8) | dev->devfn),
24436 "c" ((irq << 8) | (pin + 10)),
24437 - "S" (&pci_indirect));
24438 + "S" (&pci_indirect),
24439 + "r" (__PCIBIOS_DS));
24440 return !(ret & 0xff00);
24441 }
24442 EXPORT_SYMBOL(pcibios_set_irq_routing);
24443 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24444 index 40e4469..1ab536e 100644
24445 --- a/arch/x86/platform/efi/efi_32.c
24446 +++ b/arch/x86/platform/efi/efi_32.c
24447 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24448 {
24449 struct desc_ptr gdt_descr;
24450
24451 +#ifdef CONFIG_PAX_KERNEXEC
24452 + struct desc_struct d;
24453 +#endif
24454 +
24455 local_irq_save(efi_rt_eflags);
24456
24457 load_cr3(initial_page_table);
24458 __flush_tlb_all();
24459
24460 +#ifdef CONFIG_PAX_KERNEXEC
24461 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24462 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24463 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24464 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24465 +#endif
24466 +
24467 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24468 gdt_descr.size = GDT_SIZE - 1;
24469 load_gdt(&gdt_descr);
24470 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24471 {
24472 struct desc_ptr gdt_descr;
24473
24474 +#ifdef CONFIG_PAX_KERNEXEC
24475 + struct desc_struct d;
24476 +
24477 + memset(&d, 0, sizeof d);
24478 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24479 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24480 +#endif
24481 +
24482 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24483 gdt_descr.size = GDT_SIZE - 1;
24484 load_gdt(&gdt_descr);
24485 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24486 index fbe66e6..c5c0dd2 100644
24487 --- a/arch/x86/platform/efi/efi_stub_32.S
24488 +++ b/arch/x86/platform/efi/efi_stub_32.S
24489 @@ -6,7 +6,9 @@
24490 */
24491
24492 #include <linux/linkage.h>
24493 +#include <linux/init.h>
24494 #include <asm/page_types.h>
24495 +#include <asm/segment.h>
24496
24497 /*
24498 * efi_call_phys(void *, ...) is a function with variable parameters.
24499 @@ -20,7 +22,7 @@
24500 * service functions will comply with gcc calling convention, too.
24501 */
24502
24503 -.text
24504 +__INIT
24505 ENTRY(efi_call_phys)
24506 /*
24507 * 0. The function can only be called in Linux kernel. So CS has been
24508 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24509 * The mapping of lower virtual memory has been created in prelog and
24510 * epilog.
24511 */
24512 - movl $1f, %edx
24513 - subl $__PAGE_OFFSET, %edx
24514 - jmp *%edx
24515 + movl $(__KERNEXEC_EFI_DS), %edx
24516 + mov %edx, %ds
24517 + mov %edx, %es
24518 + mov %edx, %ss
24519 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24520 1:
24521
24522 /*
24523 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24524 * parameter 2, ..., param n. To make things easy, we save the return
24525 * address of efi_call_phys in a global variable.
24526 */
24527 - popl %edx
24528 - movl %edx, saved_return_addr
24529 - /* get the function pointer into ECX*/
24530 - popl %ecx
24531 - movl %ecx, efi_rt_function_ptr
24532 - movl $2f, %edx
24533 - subl $__PAGE_OFFSET, %edx
24534 - pushl %edx
24535 + popl (saved_return_addr)
24536 + popl (efi_rt_function_ptr)
24537
24538 /*
24539 * 3. Clear PG bit in %CR0.
24540 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24541 /*
24542 * 5. Call the physical function.
24543 */
24544 - jmp *%ecx
24545 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24546
24547 -2:
24548 /*
24549 * 6. After EFI runtime service returns, control will return to
24550 * following instruction. We'd better readjust stack pointer first.
24551 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24552 movl %cr0, %edx
24553 orl $0x80000000, %edx
24554 movl %edx, %cr0
24555 - jmp 1f
24556 -1:
24557 +
24558 /*
24559 * 8. Now restore the virtual mode from flat mode by
24560 * adding EIP with PAGE_OFFSET.
24561 */
24562 - movl $1f, %edx
24563 - jmp *%edx
24564 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24565 1:
24566 + movl $(__KERNEL_DS), %edx
24567 + mov %edx, %ds
24568 + mov %edx, %es
24569 + mov %edx, %ss
24570
24571 /*
24572 * 9. Balance the stack. And because EAX contain the return value,
24573 * we'd better not clobber it.
24574 */
24575 - leal efi_rt_function_ptr, %edx
24576 - movl (%edx), %ecx
24577 - pushl %ecx
24578 + pushl (efi_rt_function_ptr)
24579
24580 /*
24581 - * 10. Push the saved return address onto the stack and return.
24582 + * 10. Return to the saved return address.
24583 */
24584 - leal saved_return_addr, %edx
24585 - movl (%edx), %ecx
24586 - pushl %ecx
24587 - ret
24588 + jmpl *(saved_return_addr)
24589 ENDPROC(efi_call_phys)
24590 .previous
24591
24592 -.data
24593 +__INITDATA
24594 saved_return_addr:
24595 .long 0
24596 efi_rt_function_ptr:
24597 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24598 index 4c07cca..2c8427d 100644
24599 --- a/arch/x86/platform/efi/efi_stub_64.S
24600 +++ b/arch/x86/platform/efi/efi_stub_64.S
24601 @@ -7,6 +7,7 @@
24602 */
24603
24604 #include <linux/linkage.h>
24605 +#include <asm/alternative-asm.h>
24606
24607 #define SAVE_XMM \
24608 mov %rsp, %rax; \
24609 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24610 call *%rdi
24611 addq $32, %rsp
24612 RESTORE_XMM
24613 + pax_force_retaddr 0, 1
24614 ret
24615 ENDPROC(efi_call0)
24616
24617 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24618 call *%rdi
24619 addq $32, %rsp
24620 RESTORE_XMM
24621 + pax_force_retaddr 0, 1
24622 ret
24623 ENDPROC(efi_call1)
24624
24625 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24626 call *%rdi
24627 addq $32, %rsp
24628 RESTORE_XMM
24629 + pax_force_retaddr 0, 1
24630 ret
24631 ENDPROC(efi_call2)
24632
24633 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24634 call *%rdi
24635 addq $32, %rsp
24636 RESTORE_XMM
24637 + pax_force_retaddr 0, 1
24638 ret
24639 ENDPROC(efi_call3)
24640
24641 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24642 call *%rdi
24643 addq $32, %rsp
24644 RESTORE_XMM
24645 + pax_force_retaddr 0, 1
24646 ret
24647 ENDPROC(efi_call4)
24648
24649 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24650 call *%rdi
24651 addq $48, %rsp
24652 RESTORE_XMM
24653 + pax_force_retaddr 0, 1
24654 ret
24655 ENDPROC(efi_call5)
24656
24657 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24658 call *%rdi
24659 addq $48, %rsp
24660 RESTORE_XMM
24661 + pax_force_retaddr 0, 1
24662 ret
24663 ENDPROC(efi_call6)
24664 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24665 index ad4ec1c..686479e 100644
24666 --- a/arch/x86/platform/mrst/mrst.c
24667 +++ b/arch/x86/platform/mrst/mrst.c
24668 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24669 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24670 int sfi_mrtc_num;
24671
24672 -static void mrst_power_off(void)
24673 +static __noreturn void mrst_power_off(void)
24674 {
24675 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24676 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24677 + BUG();
24678 }
24679
24680 -static void mrst_reboot(void)
24681 +static __noreturn void mrst_reboot(void)
24682 {
24683 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24684 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24685 else
24686 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24687 + BUG();
24688 }
24689
24690 /* parse all the mtimer info to a static mtimer array */
24691 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24692 index f10c0af..3ec1f95 100644
24693 --- a/arch/x86/power/cpu.c
24694 +++ b/arch/x86/power/cpu.c
24695 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24696 static void fix_processor_context(void)
24697 {
24698 int cpu = smp_processor_id();
24699 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24700 + struct tss_struct *t = init_tss + cpu;
24701
24702 set_tss_desc(cpu, t); /*
24703 * This just modifies memory; should not be
24704 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24705 */
24706
24707 #ifdef CONFIG_X86_64
24708 + pax_open_kernel();
24709 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24710 + pax_close_kernel();
24711
24712 syscall_init(); /* This sets MSR_*STAR and related */
24713 #endif
24714 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24715 index 5d17950..2253fc9 100644
24716 --- a/arch/x86/vdso/Makefile
24717 +++ b/arch/x86/vdso/Makefile
24718 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24719 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24720 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24721
24722 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24723 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24724 GCOV_PROFILE := n
24725
24726 #
24727 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24728 index 468d591..8e80a0a 100644
24729 --- a/arch/x86/vdso/vdso32-setup.c
24730 +++ b/arch/x86/vdso/vdso32-setup.c
24731 @@ -25,6 +25,7 @@
24732 #include <asm/tlbflush.h>
24733 #include <asm/vdso.h>
24734 #include <asm/proto.h>
24735 +#include <asm/mman.h>
24736
24737 enum {
24738 VDSO_DISABLED = 0,
24739 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24740 void enable_sep_cpu(void)
24741 {
24742 int cpu = get_cpu();
24743 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24744 + struct tss_struct *tss = init_tss + cpu;
24745
24746 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24747 put_cpu();
24748 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24749 gate_vma.vm_start = FIXADDR_USER_START;
24750 gate_vma.vm_end = FIXADDR_USER_END;
24751 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24752 - gate_vma.vm_page_prot = __P101;
24753 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24754 /*
24755 * Make sure the vDSO gets into every core dump.
24756 * Dumping its contents makes post-mortem fully interpretable later
24757 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24758 if (compat)
24759 addr = VDSO_HIGH_BASE;
24760 else {
24761 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24762 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24763 if (IS_ERR_VALUE(addr)) {
24764 ret = addr;
24765 goto up_fail;
24766 }
24767 }
24768
24769 - current->mm->context.vdso = (void *)addr;
24770 + current->mm->context.vdso = addr;
24771
24772 if (compat_uses_vma || !compat) {
24773 /*
24774 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24775 }
24776
24777 current_thread_info()->sysenter_return =
24778 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24779 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24780
24781 up_fail:
24782 if (ret)
24783 - current->mm->context.vdso = NULL;
24784 + current->mm->context.vdso = 0;
24785
24786 up_write(&mm->mmap_sem);
24787
24788 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24789
24790 const char *arch_vma_name(struct vm_area_struct *vma)
24791 {
24792 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24793 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24794 return "[vdso]";
24795 +
24796 +#ifdef CONFIG_PAX_SEGMEXEC
24797 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24798 + return "[vdso]";
24799 +#endif
24800 +
24801 return NULL;
24802 }
24803
24804 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24805 * Check to see if the corresponding task was created in compat vdso
24806 * mode.
24807 */
24808 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24809 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24810 return &gate_vma;
24811 return NULL;
24812 }
24813 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24814 index 153407c..611cba9 100644
24815 --- a/arch/x86/vdso/vma.c
24816 +++ b/arch/x86/vdso/vma.c
24817 @@ -16,8 +16,6 @@
24818 #include <asm/vdso.h>
24819 #include <asm/page.h>
24820
24821 -unsigned int __read_mostly vdso_enabled = 1;
24822 -
24823 extern char vdso_start[], vdso_end[];
24824 extern unsigned short vdso_sync_cpuid;
24825
24826 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24827 * unaligned here as a result of stack start randomization.
24828 */
24829 addr = PAGE_ALIGN(addr);
24830 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24831
24832 return addr;
24833 }
24834 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24835 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24836 {
24837 struct mm_struct *mm = current->mm;
24838 - unsigned long addr;
24839 + unsigned long addr = 0;
24840 int ret;
24841
24842 - if (!vdso_enabled)
24843 - return 0;
24844 -
24845 down_write(&mm->mmap_sem);
24846 +
24847 +#ifdef CONFIG_PAX_RANDMMAP
24848 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24849 +#endif
24850 +
24851 addr = vdso_addr(mm->start_stack, vdso_size);
24852 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24853 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24854 if (IS_ERR_VALUE(addr)) {
24855 ret = addr;
24856 goto up_fail;
24857 }
24858
24859 - current->mm->context.vdso = (void *)addr;
24860 + mm->context.vdso = addr;
24861
24862 ret = install_special_mapping(mm, addr, vdso_size,
24863 VM_READ|VM_EXEC|
24864 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24865 VM_ALWAYSDUMP,
24866 vdso_pages);
24867 - if (ret) {
24868 - current->mm->context.vdso = NULL;
24869 - goto up_fail;
24870 - }
24871 +
24872 + if (ret)
24873 + mm->context.vdso = 0;
24874
24875 up_fail:
24876 up_write(&mm->mmap_sem);
24877 return ret;
24878 }
24879 -
24880 -static __init int vdso_setup(char *s)
24881 -{
24882 - vdso_enabled = simple_strtoul(s, NULL, 0);
24883 - return 0;
24884 -}
24885 -__setup("vdso=", vdso_setup);
24886 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24887 index 1f92865..c843b20 100644
24888 --- a/arch/x86/xen/enlighten.c
24889 +++ b/arch/x86/xen/enlighten.c
24890 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24891
24892 struct shared_info xen_dummy_shared_info;
24893
24894 -void *xen_initial_gdt;
24895 -
24896 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24897 __read_mostly int xen_have_vector_callback;
24898 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24899 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24900 #endif
24901 };
24902
24903 -static void xen_reboot(int reason)
24904 +static __noreturn void xen_reboot(int reason)
24905 {
24906 struct sched_shutdown r = { .reason = reason };
24907
24908 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24909 BUG();
24910 }
24911
24912 -static void xen_restart(char *msg)
24913 +static __noreturn void xen_restart(char *msg)
24914 {
24915 xen_reboot(SHUTDOWN_reboot);
24916 }
24917
24918 -static void xen_emergency_restart(void)
24919 +static __noreturn void xen_emergency_restart(void)
24920 {
24921 xen_reboot(SHUTDOWN_reboot);
24922 }
24923
24924 -static void xen_machine_halt(void)
24925 +static __noreturn void xen_machine_halt(void)
24926 {
24927 xen_reboot(SHUTDOWN_poweroff);
24928 }
24929 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24930 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24931
24932 /* Work out if we support NX */
24933 - x86_configure_nx();
24934 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24935 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24936 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24937 + unsigned l, h;
24938 +
24939 + __supported_pte_mask |= _PAGE_NX;
24940 + rdmsr(MSR_EFER, l, h);
24941 + l |= EFER_NX;
24942 + wrmsr(MSR_EFER, l, h);
24943 + }
24944 +#endif
24945
24946 xen_setup_features();
24947
24948 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24949
24950 machine_ops = xen_machine_ops;
24951
24952 - /*
24953 - * The only reliable way to retain the initial address of the
24954 - * percpu gdt_page is to remember it here, so we can go and
24955 - * mark it RW later, when the initial percpu area is freed.
24956 - */
24957 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24958 -
24959 xen_smp_init();
24960
24961 #ifdef CONFIG_ACPI_NUMA
24962 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24963 index 87f6673..e2555a6 100644
24964 --- a/arch/x86/xen/mmu.c
24965 +++ b/arch/x86/xen/mmu.c
24966 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24967 convert_pfn_mfn(init_level4_pgt);
24968 convert_pfn_mfn(level3_ident_pgt);
24969 convert_pfn_mfn(level3_kernel_pgt);
24970 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24971 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24972 + convert_pfn_mfn(level3_vmemmap_pgt);
24973
24974 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24975 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24976 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24977 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24978 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24979 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24980 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24981 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24982 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24983 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24984 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24985 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24986 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24987
24988 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24989 pv_mmu_ops.set_pud = xen_set_pud;
24990 #if PAGETABLE_LEVELS == 4
24991 pv_mmu_ops.set_pgd = xen_set_pgd;
24992 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24993 #endif
24994
24995 /* This will work as long as patching hasn't happened yet
24996 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24997 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24998 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24999 .set_pgd = xen_set_pgd_hyper,
25000 + .set_pgd_batched = xen_set_pgd_hyper,
25001
25002 .alloc_pud = xen_alloc_pmd_init,
25003 .release_pud = xen_release_pmd_init,
25004 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
25005 index 041d4fe..7666b7e 100644
25006 --- a/arch/x86/xen/smp.c
25007 +++ b/arch/x86/xen/smp.c
25008 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
25009 {
25010 BUG_ON(smp_processor_id() != 0);
25011 native_smp_prepare_boot_cpu();
25012 -
25013 - /* We've switched to the "real" per-cpu gdt, so make sure the
25014 - old memory can be recycled */
25015 - make_lowmem_page_readwrite(xen_initial_gdt);
25016 -
25017 xen_filter_cpu_maps();
25018 xen_setup_vcpu_info_placement();
25019 }
25020 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
25021 gdt = get_cpu_gdt_table(cpu);
25022
25023 ctxt->flags = VGCF_IN_KERNEL;
25024 - ctxt->user_regs.ds = __USER_DS;
25025 - ctxt->user_regs.es = __USER_DS;
25026 + ctxt->user_regs.ds = __KERNEL_DS;
25027 + ctxt->user_regs.es = __KERNEL_DS;
25028 ctxt->user_regs.ss = __KERNEL_DS;
25029 #ifdef CONFIG_X86_32
25030 ctxt->user_regs.fs = __KERNEL_PERCPU;
25031 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
25032 + savesegment(gs, ctxt->user_regs.gs);
25033 #else
25034 ctxt->gs_base_kernel = per_cpu_offset(cpu);
25035 #endif
25036 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
25037 int rc;
25038
25039 per_cpu(current_task, cpu) = idle;
25040 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
25041 #ifdef CONFIG_X86_32
25042 irq_ctx_init(cpu);
25043 #else
25044 clear_tsk_thread_flag(idle, TIF_FORK);
25045 - per_cpu(kernel_stack, cpu) =
25046 - (unsigned long)task_stack_page(idle) -
25047 - KERNEL_STACK_OFFSET + THREAD_SIZE;
25048 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25049 #endif
25050 xen_setup_runstate_info(cpu);
25051 xen_setup_timer(cpu);
25052 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
25053 index b040b0e..8cc4fe0 100644
25054 --- a/arch/x86/xen/xen-asm_32.S
25055 +++ b/arch/x86/xen/xen-asm_32.S
25056 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
25057 ESP_OFFSET=4 # bytes pushed onto stack
25058
25059 /*
25060 - * Store vcpu_info pointer for easy access. Do it this way to
25061 - * avoid having to reload %fs
25062 + * Store vcpu_info pointer for easy access.
25063 */
25064 #ifdef CONFIG_SMP
25065 - GET_THREAD_INFO(%eax)
25066 - movl TI_cpu(%eax), %eax
25067 - movl __per_cpu_offset(,%eax,4), %eax
25068 - mov xen_vcpu(%eax), %eax
25069 + push %fs
25070 + mov $(__KERNEL_PERCPU), %eax
25071 + mov %eax, %fs
25072 + mov PER_CPU_VAR(xen_vcpu), %eax
25073 + pop %fs
25074 #else
25075 movl xen_vcpu, %eax
25076 #endif
25077 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
25078 index aaa7291..3f77960 100644
25079 --- a/arch/x86/xen/xen-head.S
25080 +++ b/arch/x86/xen/xen-head.S
25081 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
25082 #ifdef CONFIG_X86_32
25083 mov %esi,xen_start_info
25084 mov $init_thread_union+THREAD_SIZE,%esp
25085 +#ifdef CONFIG_SMP
25086 + movl $cpu_gdt_table,%edi
25087 + movl $__per_cpu_load,%eax
25088 + movw %ax,__KERNEL_PERCPU + 2(%edi)
25089 + rorl $16,%eax
25090 + movb %al,__KERNEL_PERCPU + 4(%edi)
25091 + movb %ah,__KERNEL_PERCPU + 7(%edi)
25092 + movl $__per_cpu_end - 1,%eax
25093 + subl $__per_cpu_start,%eax
25094 + movw %ax,__KERNEL_PERCPU + 0(%edi)
25095 +#endif
25096 #else
25097 mov %rsi,xen_start_info
25098 mov $init_thread_union+THREAD_SIZE,%rsp
25099 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
25100 index b095739..8c17bcd 100644
25101 --- a/arch/x86/xen/xen-ops.h
25102 +++ b/arch/x86/xen/xen-ops.h
25103 @@ -10,8 +10,6 @@
25104 extern const char xen_hypervisor_callback[];
25105 extern const char xen_failsafe_callback[];
25106
25107 -extern void *xen_initial_gdt;
25108 -
25109 struct trap_info;
25110 void xen_copy_trap_info(struct trap_info *traps);
25111
25112 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
25113 index 58916af..9cb880b 100644
25114 --- a/block/blk-iopoll.c
25115 +++ b/block/blk-iopoll.c
25116 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
25117 }
25118 EXPORT_SYMBOL(blk_iopoll_complete);
25119
25120 -static void blk_iopoll_softirq(struct softirq_action *h)
25121 +static void blk_iopoll_softirq(void)
25122 {
25123 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
25124 int rearm = 0, budget = blk_iopoll_budget;
25125 diff --git a/block/blk-map.c b/block/blk-map.c
25126 index 623e1cd..ca1e109 100644
25127 --- a/block/blk-map.c
25128 +++ b/block/blk-map.c
25129 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
25130 if (!len || !kbuf)
25131 return -EINVAL;
25132
25133 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
25134 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
25135 if (do_copy)
25136 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
25137 else
25138 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
25139 index 1366a89..e17f54b 100644
25140 --- a/block/blk-softirq.c
25141 +++ b/block/blk-softirq.c
25142 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
25143 * Softirq action handler - move entries to local list and loop over them
25144 * while passing them to the queue registered handler.
25145 */
25146 -static void blk_done_softirq(struct softirq_action *h)
25147 +static void blk_done_softirq(void)
25148 {
25149 struct list_head *cpu_list, local_list;
25150
25151 diff --git a/block/bsg.c b/block/bsg.c
25152 index 702f131..37808bf 100644
25153 --- a/block/bsg.c
25154 +++ b/block/bsg.c
25155 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
25156 struct sg_io_v4 *hdr, struct bsg_device *bd,
25157 fmode_t has_write_perm)
25158 {
25159 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25160 + unsigned char *cmdptr;
25161 +
25162 if (hdr->request_len > BLK_MAX_CDB) {
25163 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
25164 if (!rq->cmd)
25165 return -ENOMEM;
25166 - }
25167 + cmdptr = rq->cmd;
25168 + } else
25169 + cmdptr = tmpcmd;
25170
25171 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
25172 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
25173 hdr->request_len))
25174 return -EFAULT;
25175
25176 + if (cmdptr != rq->cmd)
25177 + memcpy(rq->cmd, cmdptr, hdr->request_len);
25178 +
25179 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
25180 if (blk_verify_command(rq->cmd, has_write_perm))
25181 return -EPERM;
25182 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
25183 index 7b72502..646105c 100644
25184 --- a/block/compat_ioctl.c
25185 +++ b/block/compat_ioctl.c
25186 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
25187 err |= __get_user(f->spec1, &uf->spec1);
25188 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
25189 err |= __get_user(name, &uf->name);
25190 - f->name = compat_ptr(name);
25191 + f->name = (void __force_kernel *)compat_ptr(name);
25192 if (err) {
25193 err = -EFAULT;
25194 goto out;
25195 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
25196 index 688be8a..8a37d98 100644
25197 --- a/block/scsi_ioctl.c
25198 +++ b/block/scsi_ioctl.c
25199 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
25200 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
25201 struct sg_io_hdr *hdr, fmode_t mode)
25202 {
25203 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
25204 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25205 + unsigned char *cmdptr;
25206 +
25207 + if (rq->cmd != rq->__cmd)
25208 + cmdptr = rq->cmd;
25209 + else
25210 + cmdptr = tmpcmd;
25211 +
25212 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
25213 return -EFAULT;
25214 +
25215 + if (cmdptr != rq->cmd)
25216 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
25217 +
25218 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
25219 return -EPERM;
25220
25221 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25222 int err;
25223 unsigned int in_len, out_len, bytes, opcode, cmdlen;
25224 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
25225 + unsigned char tmpcmd[sizeof(rq->__cmd)];
25226 + unsigned char *cmdptr;
25227
25228 if (!sic)
25229 return -EINVAL;
25230 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25231 */
25232 err = -EFAULT;
25233 rq->cmd_len = cmdlen;
25234 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
25235 +
25236 + if (rq->cmd != rq->__cmd)
25237 + cmdptr = rq->cmd;
25238 + else
25239 + cmdptr = tmpcmd;
25240 +
25241 + if (copy_from_user(cmdptr, sic->data, cmdlen))
25242 goto error;
25243
25244 + if (rq->cmd != cmdptr)
25245 + memcpy(rq->cmd, cmdptr, cmdlen);
25246 +
25247 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25248 goto error;
25249
25250 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25251 index 671d4d6..5f24030 100644
25252 --- a/crypto/cryptd.c
25253 +++ b/crypto/cryptd.c
25254 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25255
25256 struct cryptd_blkcipher_request_ctx {
25257 crypto_completion_t complete;
25258 -};
25259 +} __no_const;
25260
25261 struct cryptd_hash_ctx {
25262 struct crypto_shash *child;
25263 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25264
25265 struct cryptd_aead_request_ctx {
25266 crypto_completion_t complete;
25267 -};
25268 +} __no_const;
25269
25270 static void cryptd_queue_worker(struct work_struct *work);
25271
25272 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25273 index 5d41894..22021e4 100644
25274 --- a/drivers/acpi/apei/cper.c
25275 +++ b/drivers/acpi/apei/cper.c
25276 @@ -38,12 +38,12 @@
25277 */
25278 u64 cper_next_record_id(void)
25279 {
25280 - static atomic64_t seq;
25281 + static atomic64_unchecked_t seq;
25282
25283 - if (!atomic64_read(&seq))
25284 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25285 + if (!atomic64_read_unchecked(&seq))
25286 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25287
25288 - return atomic64_inc_return(&seq);
25289 + return atomic64_inc_return_unchecked(&seq);
25290 }
25291 EXPORT_SYMBOL_GPL(cper_next_record_id);
25292
25293 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25294 index 6c47ae9..abfdd63 100644
25295 --- a/drivers/acpi/ec_sys.c
25296 +++ b/drivers/acpi/ec_sys.c
25297 @@ -12,6 +12,7 @@
25298 #include <linux/acpi.h>
25299 #include <linux/debugfs.h>
25300 #include <linux/module.h>
25301 +#include <linux/uaccess.h>
25302 #include "internal.h"
25303
25304 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25305 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25306 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25307 */
25308 unsigned int size = EC_SPACE_SIZE;
25309 - u8 *data = (u8 *) buf;
25310 + u8 data;
25311 loff_t init_off = *off;
25312 int err = 0;
25313
25314 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25315 size = count;
25316
25317 while (size) {
25318 - err = ec_read(*off, &data[*off - init_off]);
25319 + err = ec_read(*off, &data);
25320 if (err)
25321 return err;
25322 + if (put_user(data, &buf[*off - init_off]))
25323 + return -EFAULT;
25324 *off += 1;
25325 size--;
25326 }
25327 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25328
25329 unsigned int size = count;
25330 loff_t init_off = *off;
25331 - u8 *data = (u8 *) buf;
25332 int err = 0;
25333
25334 if (*off >= EC_SPACE_SIZE)
25335 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25336 }
25337
25338 while (size) {
25339 - u8 byte_write = data[*off - init_off];
25340 + u8 byte_write;
25341 + if (get_user(byte_write, &buf[*off - init_off]))
25342 + return -EFAULT;
25343 err = ec_write(*off, byte_write);
25344 if (err)
25345 return err;
25346 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25347 index 251c7b62..000462d 100644
25348 --- a/drivers/acpi/proc.c
25349 +++ b/drivers/acpi/proc.c
25350 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25351 size_t count, loff_t * ppos)
25352 {
25353 struct list_head *node, *next;
25354 - char strbuf[5];
25355 - char str[5] = "";
25356 - unsigned int len = count;
25357 + char strbuf[5] = {0};
25358
25359 - if (len > 4)
25360 - len = 4;
25361 - if (len < 0)
25362 + if (count > 4)
25363 + count = 4;
25364 + if (copy_from_user(strbuf, buffer, count))
25365 return -EFAULT;
25366 -
25367 - if (copy_from_user(strbuf, buffer, len))
25368 - return -EFAULT;
25369 - strbuf[len] = '\0';
25370 - sscanf(strbuf, "%s", str);
25371 + strbuf[count] = '\0';
25372
25373 mutex_lock(&acpi_device_lock);
25374 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25375 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25376 if (!dev->wakeup.flags.valid)
25377 continue;
25378
25379 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25380 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25381 if (device_can_wakeup(&dev->dev)) {
25382 bool enable = !device_may_wakeup(&dev->dev);
25383 device_set_wakeup_enable(&dev->dev, enable);
25384 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25385 index 9d7bc9f..a6fc091 100644
25386 --- a/drivers/acpi/processor_driver.c
25387 +++ b/drivers/acpi/processor_driver.c
25388 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25389 return 0;
25390 #endif
25391
25392 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25393 + BUG_ON(pr->id >= nr_cpu_ids);
25394
25395 /*
25396 * Buggy BIOS check
25397 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25398 index c04ad68..0b99473 100644
25399 --- a/drivers/ata/libata-core.c
25400 +++ b/drivers/ata/libata-core.c
25401 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25402 struct ata_port *ap;
25403 unsigned int tag;
25404
25405 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25406 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25407 ap = qc->ap;
25408
25409 qc->flags = 0;
25410 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25411 struct ata_port *ap;
25412 struct ata_link *link;
25413
25414 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25415 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25416 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25417 ap = qc->ap;
25418 link = qc->dev->link;
25419 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25420 return;
25421
25422 spin_lock(&lock);
25423 + pax_open_kernel();
25424
25425 for (cur = ops->inherits; cur; cur = cur->inherits) {
25426 void **inherit = (void **)cur;
25427 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25428 if (IS_ERR(*pp))
25429 *pp = NULL;
25430
25431 - ops->inherits = NULL;
25432 + *(struct ata_port_operations **)&ops->inherits = NULL;
25433
25434 + pax_close_kernel();
25435 spin_unlock(&lock);
25436 }
25437
25438 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25439 index e8574bb..f9f6a72 100644
25440 --- a/drivers/ata/pata_arasan_cf.c
25441 +++ b/drivers/ata/pata_arasan_cf.c
25442 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25443 /* Handle platform specific quirks */
25444 if (pdata->quirk) {
25445 if (pdata->quirk & CF_BROKEN_PIO) {
25446 - ap->ops->set_piomode = NULL;
25447 + pax_open_kernel();
25448 + *(void **)&ap->ops->set_piomode = NULL;
25449 + pax_close_kernel();
25450 ap->pio_mask = 0;
25451 }
25452 if (pdata->quirk & CF_BROKEN_MWDMA)
25453 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25454 index f9b983a..887b9d8 100644
25455 --- a/drivers/atm/adummy.c
25456 +++ b/drivers/atm/adummy.c
25457 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25458 vcc->pop(vcc, skb);
25459 else
25460 dev_kfree_skb_any(skb);
25461 - atomic_inc(&vcc->stats->tx);
25462 + atomic_inc_unchecked(&vcc->stats->tx);
25463
25464 return 0;
25465 }
25466 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25467 index f8f41e0..1f987dd 100644
25468 --- a/drivers/atm/ambassador.c
25469 +++ b/drivers/atm/ambassador.c
25470 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25471 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25472
25473 // VC layer stats
25474 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25475 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25476
25477 // free the descriptor
25478 kfree (tx_descr);
25479 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25480 dump_skb ("<<<", vc, skb);
25481
25482 // VC layer stats
25483 - atomic_inc(&atm_vcc->stats->rx);
25484 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25485 __net_timestamp(skb);
25486 // end of our responsibility
25487 atm_vcc->push (atm_vcc, skb);
25488 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25489 } else {
25490 PRINTK (KERN_INFO, "dropped over-size frame");
25491 // should we count this?
25492 - atomic_inc(&atm_vcc->stats->rx_drop);
25493 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25494 }
25495
25496 } else {
25497 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25498 }
25499
25500 if (check_area (skb->data, skb->len)) {
25501 - atomic_inc(&atm_vcc->stats->tx_err);
25502 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25503 return -ENOMEM; // ?
25504 }
25505
25506 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25507 index b22d71c..d6e1049 100644
25508 --- a/drivers/atm/atmtcp.c
25509 +++ b/drivers/atm/atmtcp.c
25510 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25511 if (vcc->pop) vcc->pop(vcc,skb);
25512 else dev_kfree_skb(skb);
25513 if (dev_data) return 0;
25514 - atomic_inc(&vcc->stats->tx_err);
25515 + atomic_inc_unchecked(&vcc->stats->tx_err);
25516 return -ENOLINK;
25517 }
25518 size = skb->len+sizeof(struct atmtcp_hdr);
25519 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25520 if (!new_skb) {
25521 if (vcc->pop) vcc->pop(vcc,skb);
25522 else dev_kfree_skb(skb);
25523 - atomic_inc(&vcc->stats->tx_err);
25524 + atomic_inc_unchecked(&vcc->stats->tx_err);
25525 return -ENOBUFS;
25526 }
25527 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25528 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25529 if (vcc->pop) vcc->pop(vcc,skb);
25530 else dev_kfree_skb(skb);
25531 out_vcc->push(out_vcc,new_skb);
25532 - atomic_inc(&vcc->stats->tx);
25533 - atomic_inc(&out_vcc->stats->rx);
25534 + atomic_inc_unchecked(&vcc->stats->tx);
25535 + atomic_inc_unchecked(&out_vcc->stats->rx);
25536 return 0;
25537 }
25538
25539 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25540 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25541 read_unlock(&vcc_sklist_lock);
25542 if (!out_vcc) {
25543 - atomic_inc(&vcc->stats->tx_err);
25544 + atomic_inc_unchecked(&vcc->stats->tx_err);
25545 goto done;
25546 }
25547 skb_pull(skb,sizeof(struct atmtcp_hdr));
25548 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25549 __net_timestamp(new_skb);
25550 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25551 out_vcc->push(out_vcc,new_skb);
25552 - atomic_inc(&vcc->stats->tx);
25553 - atomic_inc(&out_vcc->stats->rx);
25554 + atomic_inc_unchecked(&vcc->stats->tx);
25555 + atomic_inc_unchecked(&out_vcc->stats->rx);
25556 done:
25557 if (vcc->pop) vcc->pop(vcc,skb);
25558 else dev_kfree_skb(skb);
25559 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25560 index 956e9ac..133516d 100644
25561 --- a/drivers/atm/eni.c
25562 +++ b/drivers/atm/eni.c
25563 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25564 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25565 vcc->dev->number);
25566 length = 0;
25567 - atomic_inc(&vcc->stats->rx_err);
25568 + atomic_inc_unchecked(&vcc->stats->rx_err);
25569 }
25570 else {
25571 length = ATM_CELL_SIZE-1; /* no HEC */
25572 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25573 size);
25574 }
25575 eff = length = 0;
25576 - atomic_inc(&vcc->stats->rx_err);
25577 + atomic_inc_unchecked(&vcc->stats->rx_err);
25578 }
25579 else {
25580 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25581 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25582 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25583 vcc->dev->number,vcc->vci,length,size << 2,descr);
25584 length = eff = 0;
25585 - atomic_inc(&vcc->stats->rx_err);
25586 + atomic_inc_unchecked(&vcc->stats->rx_err);
25587 }
25588 }
25589 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25590 @@ -771,7 +771,7 @@ rx_dequeued++;
25591 vcc->push(vcc,skb);
25592 pushed++;
25593 }
25594 - atomic_inc(&vcc->stats->rx);
25595 + atomic_inc_unchecked(&vcc->stats->rx);
25596 }
25597 wake_up(&eni_dev->rx_wait);
25598 }
25599 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25600 PCI_DMA_TODEVICE);
25601 if (vcc->pop) vcc->pop(vcc,skb);
25602 else dev_kfree_skb_irq(skb);
25603 - atomic_inc(&vcc->stats->tx);
25604 + atomic_inc_unchecked(&vcc->stats->tx);
25605 wake_up(&eni_dev->tx_wait);
25606 dma_complete++;
25607 }
25608 @@ -1569,7 +1569,7 @@ tx_complete++;
25609 /*--------------------------------- entries ---------------------------------*/
25610
25611
25612 -static const char *media_name[] __devinitdata = {
25613 +static const char *media_name[] __devinitconst = {
25614 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25615 "UTP", "05?", "06?", "07?", /* 4- 7 */
25616 "TAXI","09?", "10?", "11?", /* 8-11 */
25617 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25618 index 5072f8a..fa52520d 100644
25619 --- a/drivers/atm/firestream.c
25620 +++ b/drivers/atm/firestream.c
25621 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25622 }
25623 }
25624
25625 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25626 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25627
25628 fs_dprintk (FS_DEBUG_TXMEM, "i");
25629 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25630 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25631 #endif
25632 skb_put (skb, qe->p1 & 0xffff);
25633 ATM_SKB(skb)->vcc = atm_vcc;
25634 - atomic_inc(&atm_vcc->stats->rx);
25635 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25636 __net_timestamp(skb);
25637 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25638 atm_vcc->push (atm_vcc, skb);
25639 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25640 kfree (pe);
25641 }
25642 if (atm_vcc)
25643 - atomic_inc(&atm_vcc->stats->rx_drop);
25644 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25645 break;
25646 case 0x1f: /* Reassembly abort: no buffers. */
25647 /* Silently increment error counter. */
25648 if (atm_vcc)
25649 - atomic_inc(&atm_vcc->stats->rx_drop);
25650 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25651 break;
25652 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25653 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25654 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25655 index 361f5ae..7fc552d 100644
25656 --- a/drivers/atm/fore200e.c
25657 +++ b/drivers/atm/fore200e.c
25658 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25659 #endif
25660 /* check error condition */
25661 if (*entry->status & STATUS_ERROR)
25662 - atomic_inc(&vcc->stats->tx_err);
25663 + atomic_inc_unchecked(&vcc->stats->tx_err);
25664 else
25665 - atomic_inc(&vcc->stats->tx);
25666 + atomic_inc_unchecked(&vcc->stats->tx);
25667 }
25668 }
25669
25670 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25671 if (skb == NULL) {
25672 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25673
25674 - atomic_inc(&vcc->stats->rx_drop);
25675 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25676 return -ENOMEM;
25677 }
25678
25679 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25680
25681 dev_kfree_skb_any(skb);
25682
25683 - atomic_inc(&vcc->stats->rx_drop);
25684 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25685 return -ENOMEM;
25686 }
25687
25688 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25689
25690 vcc->push(vcc, skb);
25691 - atomic_inc(&vcc->stats->rx);
25692 + atomic_inc_unchecked(&vcc->stats->rx);
25693
25694 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25695
25696 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25697 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25698 fore200e->atm_dev->number,
25699 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25700 - atomic_inc(&vcc->stats->rx_err);
25701 + atomic_inc_unchecked(&vcc->stats->rx_err);
25702 }
25703 }
25704
25705 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25706 goto retry_here;
25707 }
25708
25709 - atomic_inc(&vcc->stats->tx_err);
25710 + atomic_inc_unchecked(&vcc->stats->tx_err);
25711
25712 fore200e->tx_sat++;
25713 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25714 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25715 index 9a51df4..f3bb5f8 100644
25716 --- a/drivers/atm/he.c
25717 +++ b/drivers/atm/he.c
25718 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25719
25720 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25721 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25722 - atomic_inc(&vcc->stats->rx_drop);
25723 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25724 goto return_host_buffers;
25725 }
25726
25727 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25728 RBRQ_LEN_ERR(he_dev->rbrq_head)
25729 ? "LEN_ERR" : "",
25730 vcc->vpi, vcc->vci);
25731 - atomic_inc(&vcc->stats->rx_err);
25732 + atomic_inc_unchecked(&vcc->stats->rx_err);
25733 goto return_host_buffers;
25734 }
25735
25736 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25737 vcc->push(vcc, skb);
25738 spin_lock(&he_dev->global_lock);
25739
25740 - atomic_inc(&vcc->stats->rx);
25741 + atomic_inc_unchecked(&vcc->stats->rx);
25742
25743 return_host_buffers:
25744 ++pdus_assembled;
25745 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25746 tpd->vcc->pop(tpd->vcc, tpd->skb);
25747 else
25748 dev_kfree_skb_any(tpd->skb);
25749 - atomic_inc(&tpd->vcc->stats->tx_err);
25750 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25751 }
25752 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25753 return;
25754 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25755 vcc->pop(vcc, skb);
25756 else
25757 dev_kfree_skb_any(skb);
25758 - atomic_inc(&vcc->stats->tx_err);
25759 + atomic_inc_unchecked(&vcc->stats->tx_err);
25760 return -EINVAL;
25761 }
25762
25763 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25764 vcc->pop(vcc, skb);
25765 else
25766 dev_kfree_skb_any(skb);
25767 - atomic_inc(&vcc->stats->tx_err);
25768 + atomic_inc_unchecked(&vcc->stats->tx_err);
25769 return -EINVAL;
25770 }
25771 #endif
25772 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25773 vcc->pop(vcc, skb);
25774 else
25775 dev_kfree_skb_any(skb);
25776 - atomic_inc(&vcc->stats->tx_err);
25777 + atomic_inc_unchecked(&vcc->stats->tx_err);
25778 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25779 return -ENOMEM;
25780 }
25781 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25782 vcc->pop(vcc, skb);
25783 else
25784 dev_kfree_skb_any(skb);
25785 - atomic_inc(&vcc->stats->tx_err);
25786 + atomic_inc_unchecked(&vcc->stats->tx_err);
25787 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25788 return -ENOMEM;
25789 }
25790 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25791 __enqueue_tpd(he_dev, tpd, cid);
25792 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25793
25794 - atomic_inc(&vcc->stats->tx);
25795 + atomic_inc_unchecked(&vcc->stats->tx);
25796
25797 return 0;
25798 }
25799 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25800 index b812103..e391a49 100644
25801 --- a/drivers/atm/horizon.c
25802 +++ b/drivers/atm/horizon.c
25803 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25804 {
25805 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25806 // VC layer stats
25807 - atomic_inc(&vcc->stats->rx);
25808 + atomic_inc_unchecked(&vcc->stats->rx);
25809 __net_timestamp(skb);
25810 // end of our responsibility
25811 vcc->push (vcc, skb);
25812 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25813 dev->tx_iovec = NULL;
25814
25815 // VC layer stats
25816 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25817 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25818
25819 // free the skb
25820 hrz_kfree_skb (skb);
25821 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25822 index 1c05212..c28e200 100644
25823 --- a/drivers/atm/idt77252.c
25824 +++ b/drivers/atm/idt77252.c
25825 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25826 else
25827 dev_kfree_skb(skb);
25828
25829 - atomic_inc(&vcc->stats->tx);
25830 + atomic_inc_unchecked(&vcc->stats->tx);
25831 }
25832
25833 atomic_dec(&scq->used);
25834 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25835 if ((sb = dev_alloc_skb(64)) == NULL) {
25836 printk("%s: Can't allocate buffers for aal0.\n",
25837 card->name);
25838 - atomic_add(i, &vcc->stats->rx_drop);
25839 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25840 break;
25841 }
25842 if (!atm_charge(vcc, sb->truesize)) {
25843 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25844 card->name);
25845 - atomic_add(i - 1, &vcc->stats->rx_drop);
25846 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25847 dev_kfree_skb(sb);
25848 break;
25849 }
25850 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25851 ATM_SKB(sb)->vcc = vcc;
25852 __net_timestamp(sb);
25853 vcc->push(vcc, sb);
25854 - atomic_inc(&vcc->stats->rx);
25855 + atomic_inc_unchecked(&vcc->stats->rx);
25856
25857 cell += ATM_CELL_PAYLOAD;
25858 }
25859 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25860 "(CDC: %08x)\n",
25861 card->name, len, rpp->len, readl(SAR_REG_CDC));
25862 recycle_rx_pool_skb(card, rpp);
25863 - atomic_inc(&vcc->stats->rx_err);
25864 + atomic_inc_unchecked(&vcc->stats->rx_err);
25865 return;
25866 }
25867 if (stat & SAR_RSQE_CRC) {
25868 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25869 recycle_rx_pool_skb(card, rpp);
25870 - atomic_inc(&vcc->stats->rx_err);
25871 + atomic_inc_unchecked(&vcc->stats->rx_err);
25872 return;
25873 }
25874 if (skb_queue_len(&rpp->queue) > 1) {
25875 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25876 RXPRINTK("%s: Can't alloc RX skb.\n",
25877 card->name);
25878 recycle_rx_pool_skb(card, rpp);
25879 - atomic_inc(&vcc->stats->rx_err);
25880 + atomic_inc_unchecked(&vcc->stats->rx_err);
25881 return;
25882 }
25883 if (!atm_charge(vcc, skb->truesize)) {
25884 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25885 __net_timestamp(skb);
25886
25887 vcc->push(vcc, skb);
25888 - atomic_inc(&vcc->stats->rx);
25889 + atomic_inc_unchecked(&vcc->stats->rx);
25890
25891 return;
25892 }
25893 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25894 __net_timestamp(skb);
25895
25896 vcc->push(vcc, skb);
25897 - atomic_inc(&vcc->stats->rx);
25898 + atomic_inc_unchecked(&vcc->stats->rx);
25899
25900 if (skb->truesize > SAR_FB_SIZE_3)
25901 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25902 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25903 if (vcc->qos.aal != ATM_AAL0) {
25904 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25905 card->name, vpi, vci);
25906 - atomic_inc(&vcc->stats->rx_drop);
25907 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25908 goto drop;
25909 }
25910
25911 if ((sb = dev_alloc_skb(64)) == NULL) {
25912 printk("%s: Can't allocate buffers for AAL0.\n",
25913 card->name);
25914 - atomic_inc(&vcc->stats->rx_err);
25915 + atomic_inc_unchecked(&vcc->stats->rx_err);
25916 goto drop;
25917 }
25918
25919 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25920 ATM_SKB(sb)->vcc = vcc;
25921 __net_timestamp(sb);
25922 vcc->push(vcc, sb);
25923 - atomic_inc(&vcc->stats->rx);
25924 + atomic_inc_unchecked(&vcc->stats->rx);
25925
25926 drop:
25927 skb_pull(queue, 64);
25928 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25929
25930 if (vc == NULL) {
25931 printk("%s: NULL connection in send().\n", card->name);
25932 - atomic_inc(&vcc->stats->tx_err);
25933 + atomic_inc_unchecked(&vcc->stats->tx_err);
25934 dev_kfree_skb(skb);
25935 return -EINVAL;
25936 }
25937 if (!test_bit(VCF_TX, &vc->flags)) {
25938 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25939 - atomic_inc(&vcc->stats->tx_err);
25940 + atomic_inc_unchecked(&vcc->stats->tx_err);
25941 dev_kfree_skb(skb);
25942 return -EINVAL;
25943 }
25944 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25945 break;
25946 default:
25947 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25948 - atomic_inc(&vcc->stats->tx_err);
25949 + atomic_inc_unchecked(&vcc->stats->tx_err);
25950 dev_kfree_skb(skb);
25951 return -EINVAL;
25952 }
25953
25954 if (skb_shinfo(skb)->nr_frags != 0) {
25955 printk("%s: No scatter-gather yet.\n", card->name);
25956 - atomic_inc(&vcc->stats->tx_err);
25957 + atomic_inc_unchecked(&vcc->stats->tx_err);
25958 dev_kfree_skb(skb);
25959 return -EINVAL;
25960 }
25961 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25962
25963 err = queue_skb(card, vc, skb, oam);
25964 if (err) {
25965 - atomic_inc(&vcc->stats->tx_err);
25966 + atomic_inc_unchecked(&vcc->stats->tx_err);
25967 dev_kfree_skb(skb);
25968 return err;
25969 }
25970 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25971 skb = dev_alloc_skb(64);
25972 if (!skb) {
25973 printk("%s: Out of memory in send_oam().\n", card->name);
25974 - atomic_inc(&vcc->stats->tx_err);
25975 + atomic_inc_unchecked(&vcc->stats->tx_err);
25976 return -ENOMEM;
25977 }
25978 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25979 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25980 index 3d0c2b0..45441fa 100644
25981 --- a/drivers/atm/iphase.c
25982 +++ b/drivers/atm/iphase.c
25983 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25984 status = (u_short) (buf_desc_ptr->desc_mode);
25985 if (status & (RX_CER | RX_PTE | RX_OFL))
25986 {
25987 - atomic_inc(&vcc->stats->rx_err);
25988 + atomic_inc_unchecked(&vcc->stats->rx_err);
25989 IF_ERR(printk("IA: bad packet, dropping it");)
25990 if (status & RX_CER) {
25991 IF_ERR(printk(" cause: packet CRC error\n");)
25992 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25993 len = dma_addr - buf_addr;
25994 if (len > iadev->rx_buf_sz) {
25995 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25996 - atomic_inc(&vcc->stats->rx_err);
25997 + atomic_inc_unchecked(&vcc->stats->rx_err);
25998 goto out_free_desc;
25999 }
26000
26001 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26002 ia_vcc = INPH_IA_VCC(vcc);
26003 if (ia_vcc == NULL)
26004 {
26005 - atomic_inc(&vcc->stats->rx_err);
26006 + atomic_inc_unchecked(&vcc->stats->rx_err);
26007 dev_kfree_skb_any(skb);
26008 atm_return(vcc, atm_guess_pdu2truesize(len));
26009 goto INCR_DLE;
26010 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26011 if ((length > iadev->rx_buf_sz) || (length >
26012 (skb->len - sizeof(struct cpcs_trailer))))
26013 {
26014 - atomic_inc(&vcc->stats->rx_err);
26015 + atomic_inc_unchecked(&vcc->stats->rx_err);
26016 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26017 length, skb->len);)
26018 dev_kfree_skb_any(skb);
26019 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
26020
26021 IF_RX(printk("rx_dle_intr: skb push");)
26022 vcc->push(vcc,skb);
26023 - atomic_inc(&vcc->stats->rx);
26024 + atomic_inc_unchecked(&vcc->stats->rx);
26025 iadev->rx_pkt_cnt++;
26026 }
26027 INCR_DLE:
26028 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
26029 {
26030 struct k_sonet_stats *stats;
26031 stats = &PRIV(_ia_dev[board])->sonet_stats;
26032 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26033 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26034 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26035 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26036 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26037 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26038 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26039 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26040 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26041 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26042 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26043 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26044 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26045 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26046 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26047 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26048 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26049 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26050 }
26051 ia_cmds.status = 0;
26052 break;
26053 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26054 if ((desc == 0) || (desc > iadev->num_tx_desc))
26055 {
26056 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26057 - atomic_inc(&vcc->stats->tx);
26058 + atomic_inc_unchecked(&vcc->stats->tx);
26059 if (vcc->pop)
26060 vcc->pop(vcc, skb);
26061 else
26062 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
26063 ATM_DESC(skb) = vcc->vci;
26064 skb_queue_tail(&iadev->tx_dma_q, skb);
26065
26066 - atomic_inc(&vcc->stats->tx);
26067 + atomic_inc_unchecked(&vcc->stats->tx);
26068 iadev->tx_pkt_cnt++;
26069 /* Increment transaction counter */
26070 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26071
26072 #if 0
26073 /* add flow control logic */
26074 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26075 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26076 if (iavcc->vc_desc_cnt > 10) {
26077 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26078 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26079 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
26080 index f556969..0da15eb 100644
26081 --- a/drivers/atm/lanai.c
26082 +++ b/drivers/atm/lanai.c
26083 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
26084 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26085 lanai_endtx(lanai, lvcc);
26086 lanai_free_skb(lvcc->tx.atmvcc, skb);
26087 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26088 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26089 }
26090
26091 /* Try to fill the buffer - don't call unless there is backlog */
26092 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
26093 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26094 __net_timestamp(skb);
26095 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26096 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26097 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26098 out:
26099 lvcc->rx.buf.ptr = end;
26100 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26101 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26102 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26103 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26104 lanai->stats.service_rxnotaal5++;
26105 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26106 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26107 return 0;
26108 }
26109 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26110 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26111 int bytes;
26112 read_unlock(&vcc_sklist_lock);
26113 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26114 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26115 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26116 lvcc->stats.x.aal5.service_trash++;
26117 bytes = (SERVICE_GET_END(s) * 16) -
26118 (((unsigned long) lvcc->rx.buf.ptr) -
26119 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26120 }
26121 if (s & SERVICE_STREAM) {
26122 read_unlock(&vcc_sklist_lock);
26123 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26124 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26125 lvcc->stats.x.aal5.service_stream++;
26126 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26127 "PDU on VCI %d!\n", lanai->number, vci);
26128 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
26129 return 0;
26130 }
26131 DPRINTK("got rx crc error on vci %d\n", vci);
26132 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26133 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26134 lvcc->stats.x.aal5.service_rxcrc++;
26135 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26136 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26137 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
26138 index 1c70c45..300718d 100644
26139 --- a/drivers/atm/nicstar.c
26140 +++ b/drivers/atm/nicstar.c
26141 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26142 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
26143 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
26144 card->index);
26145 - atomic_inc(&vcc->stats->tx_err);
26146 + atomic_inc_unchecked(&vcc->stats->tx_err);
26147 dev_kfree_skb_any(skb);
26148 return -EINVAL;
26149 }
26150 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26151 if (!vc->tx) {
26152 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
26153 card->index);
26154 - atomic_inc(&vcc->stats->tx_err);
26155 + atomic_inc_unchecked(&vcc->stats->tx_err);
26156 dev_kfree_skb_any(skb);
26157 return -EINVAL;
26158 }
26159 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26160 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
26161 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
26162 card->index);
26163 - atomic_inc(&vcc->stats->tx_err);
26164 + atomic_inc_unchecked(&vcc->stats->tx_err);
26165 dev_kfree_skb_any(skb);
26166 return -EINVAL;
26167 }
26168
26169 if (skb_shinfo(skb)->nr_frags != 0) {
26170 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26171 - atomic_inc(&vcc->stats->tx_err);
26172 + atomic_inc_unchecked(&vcc->stats->tx_err);
26173 dev_kfree_skb_any(skb);
26174 return -EINVAL;
26175 }
26176 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26177 }
26178
26179 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26180 - atomic_inc(&vcc->stats->tx_err);
26181 + atomic_inc_unchecked(&vcc->stats->tx_err);
26182 dev_kfree_skb_any(skb);
26183 return -EIO;
26184 }
26185 - atomic_inc(&vcc->stats->tx);
26186 + atomic_inc_unchecked(&vcc->stats->tx);
26187
26188 return 0;
26189 }
26190 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26191 printk
26192 ("nicstar%d: Can't allocate buffers for aal0.\n",
26193 card->index);
26194 - atomic_add(i, &vcc->stats->rx_drop);
26195 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26196 break;
26197 }
26198 if (!atm_charge(vcc, sb->truesize)) {
26199 RXPRINTK
26200 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26201 card->index);
26202 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26203 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26204 dev_kfree_skb_any(sb);
26205 break;
26206 }
26207 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26208 ATM_SKB(sb)->vcc = vcc;
26209 __net_timestamp(sb);
26210 vcc->push(vcc, sb);
26211 - atomic_inc(&vcc->stats->rx);
26212 + atomic_inc_unchecked(&vcc->stats->rx);
26213 cell += ATM_CELL_PAYLOAD;
26214 }
26215
26216 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26217 if (iovb == NULL) {
26218 printk("nicstar%d: Out of iovec buffers.\n",
26219 card->index);
26220 - atomic_inc(&vcc->stats->rx_drop);
26221 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26222 recycle_rx_buf(card, skb);
26223 return;
26224 }
26225 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26226 small or large buffer itself. */
26227 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26228 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26229 - atomic_inc(&vcc->stats->rx_err);
26230 + atomic_inc_unchecked(&vcc->stats->rx_err);
26231 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26232 NS_MAX_IOVECS);
26233 NS_PRV_IOVCNT(iovb) = 0;
26234 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26235 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26236 card->index);
26237 which_list(card, skb);
26238 - atomic_inc(&vcc->stats->rx_err);
26239 + atomic_inc_unchecked(&vcc->stats->rx_err);
26240 recycle_rx_buf(card, skb);
26241 vc->rx_iov = NULL;
26242 recycle_iov_buf(card, iovb);
26243 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26244 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26245 card->index);
26246 which_list(card, skb);
26247 - atomic_inc(&vcc->stats->rx_err);
26248 + atomic_inc_unchecked(&vcc->stats->rx_err);
26249 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26250 NS_PRV_IOVCNT(iovb));
26251 vc->rx_iov = NULL;
26252 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26253 printk(" - PDU size mismatch.\n");
26254 else
26255 printk(".\n");
26256 - atomic_inc(&vcc->stats->rx_err);
26257 + atomic_inc_unchecked(&vcc->stats->rx_err);
26258 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26259 NS_PRV_IOVCNT(iovb));
26260 vc->rx_iov = NULL;
26261 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26262 /* skb points to a small buffer */
26263 if (!atm_charge(vcc, skb->truesize)) {
26264 push_rxbufs(card, skb);
26265 - atomic_inc(&vcc->stats->rx_drop);
26266 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26267 } else {
26268 skb_put(skb, len);
26269 dequeue_sm_buf(card, skb);
26270 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26271 ATM_SKB(skb)->vcc = vcc;
26272 __net_timestamp(skb);
26273 vcc->push(vcc, skb);
26274 - atomic_inc(&vcc->stats->rx);
26275 + atomic_inc_unchecked(&vcc->stats->rx);
26276 }
26277 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26278 struct sk_buff *sb;
26279 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26280 if (len <= NS_SMBUFSIZE) {
26281 if (!atm_charge(vcc, sb->truesize)) {
26282 push_rxbufs(card, sb);
26283 - atomic_inc(&vcc->stats->rx_drop);
26284 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26285 } else {
26286 skb_put(sb, len);
26287 dequeue_sm_buf(card, sb);
26288 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26289 ATM_SKB(sb)->vcc = vcc;
26290 __net_timestamp(sb);
26291 vcc->push(vcc, sb);
26292 - atomic_inc(&vcc->stats->rx);
26293 + atomic_inc_unchecked(&vcc->stats->rx);
26294 }
26295
26296 push_rxbufs(card, skb);
26297 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26298
26299 if (!atm_charge(vcc, skb->truesize)) {
26300 push_rxbufs(card, skb);
26301 - atomic_inc(&vcc->stats->rx_drop);
26302 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26303 } else {
26304 dequeue_lg_buf(card, skb);
26305 #ifdef NS_USE_DESTRUCTORS
26306 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26307 ATM_SKB(skb)->vcc = vcc;
26308 __net_timestamp(skb);
26309 vcc->push(vcc, skb);
26310 - atomic_inc(&vcc->stats->rx);
26311 + atomic_inc_unchecked(&vcc->stats->rx);
26312 }
26313
26314 push_rxbufs(card, sb);
26315 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26316 printk
26317 ("nicstar%d: Out of huge buffers.\n",
26318 card->index);
26319 - atomic_inc(&vcc->stats->rx_drop);
26320 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26321 recycle_iovec_rx_bufs(card,
26322 (struct iovec *)
26323 iovb->data,
26324 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26325 card->hbpool.count++;
26326 } else
26327 dev_kfree_skb_any(hb);
26328 - atomic_inc(&vcc->stats->rx_drop);
26329 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26330 } else {
26331 /* Copy the small buffer to the huge buffer */
26332 sb = (struct sk_buff *)iov->iov_base;
26333 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26334 #endif /* NS_USE_DESTRUCTORS */
26335 __net_timestamp(hb);
26336 vcc->push(vcc, hb);
26337 - atomic_inc(&vcc->stats->rx);
26338 + atomic_inc_unchecked(&vcc->stats->rx);
26339 }
26340 }
26341
26342 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26343 index 5d1d076..12fbca4 100644
26344 --- a/drivers/atm/solos-pci.c
26345 +++ b/drivers/atm/solos-pci.c
26346 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26347 }
26348 atm_charge(vcc, skb->truesize);
26349 vcc->push(vcc, skb);
26350 - atomic_inc(&vcc->stats->rx);
26351 + atomic_inc_unchecked(&vcc->stats->rx);
26352 break;
26353
26354 case PKT_STATUS:
26355 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26356 vcc = SKB_CB(oldskb)->vcc;
26357
26358 if (vcc) {
26359 - atomic_inc(&vcc->stats->tx);
26360 + atomic_inc_unchecked(&vcc->stats->tx);
26361 solos_pop(vcc, oldskb);
26362 } else
26363 dev_kfree_skb_irq(oldskb);
26364 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26365 index 90f1ccc..04c4a1e 100644
26366 --- a/drivers/atm/suni.c
26367 +++ b/drivers/atm/suni.c
26368 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26369
26370
26371 #define ADD_LIMITED(s,v) \
26372 - atomic_add((v),&stats->s); \
26373 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26374 + atomic_add_unchecked((v),&stats->s); \
26375 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26376
26377
26378 static void suni_hz(unsigned long from_timer)
26379 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26380 index 5120a96..e2572bd 100644
26381 --- a/drivers/atm/uPD98402.c
26382 +++ b/drivers/atm/uPD98402.c
26383 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26384 struct sonet_stats tmp;
26385 int error = 0;
26386
26387 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26388 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26389 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26390 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26391 if (zero && !error) {
26392 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26393
26394
26395 #define ADD_LIMITED(s,v) \
26396 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26397 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26398 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26399 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26400 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26401 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26402
26403
26404 static void stat_event(struct atm_dev *dev)
26405 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26406 if (reason & uPD98402_INT_PFM) stat_event(dev);
26407 if (reason & uPD98402_INT_PCO) {
26408 (void) GET(PCOCR); /* clear interrupt cause */
26409 - atomic_add(GET(HECCT),
26410 + atomic_add_unchecked(GET(HECCT),
26411 &PRIV(dev)->sonet_stats.uncorr_hcs);
26412 }
26413 if ((reason & uPD98402_INT_RFO) &&
26414 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26415 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26416 uPD98402_INT_LOS),PIMR); /* enable them */
26417 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26418 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26419 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26420 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26421 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26422 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26423 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26424 return 0;
26425 }
26426
26427 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26428 index d889f56..17eb71e 100644
26429 --- a/drivers/atm/zatm.c
26430 +++ b/drivers/atm/zatm.c
26431 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26432 }
26433 if (!size) {
26434 dev_kfree_skb_irq(skb);
26435 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26436 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26437 continue;
26438 }
26439 if (!atm_charge(vcc,skb->truesize)) {
26440 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26441 skb->len = size;
26442 ATM_SKB(skb)->vcc = vcc;
26443 vcc->push(vcc,skb);
26444 - atomic_inc(&vcc->stats->rx);
26445 + atomic_inc_unchecked(&vcc->stats->rx);
26446 }
26447 zout(pos & 0xffff,MTA(mbx));
26448 #if 0 /* probably a stupid idea */
26449 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26450 skb_queue_head(&zatm_vcc->backlog,skb);
26451 break;
26452 }
26453 - atomic_inc(&vcc->stats->tx);
26454 + atomic_inc_unchecked(&vcc->stats->tx);
26455 wake_up(&zatm_vcc->tx_wait);
26456 }
26457
26458 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26459 index a4760e0..51283cf 100644
26460 --- a/drivers/base/devtmpfs.c
26461 +++ b/drivers/base/devtmpfs.c
26462 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26463 if (!thread)
26464 return 0;
26465
26466 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26467 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26468 if (err)
26469 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26470 else
26471 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26472 index caf995f..6f76697 100644
26473 --- a/drivers/base/power/wakeup.c
26474 +++ b/drivers/base/power/wakeup.c
26475 @@ -30,14 +30,14 @@ bool events_check_enabled;
26476 * They need to be modified together atomically, so it's better to use one
26477 * atomic variable to hold them both.
26478 */
26479 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26480 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26481
26482 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26483 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26484
26485 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26486 {
26487 - unsigned int comb = atomic_read(&combined_event_count);
26488 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26489
26490 *cnt = (comb >> IN_PROGRESS_BITS);
26491 *inpr = comb & MAX_IN_PROGRESS;
26492 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26493 ws->last_time = ktime_get();
26494
26495 /* Increment the counter of events in progress. */
26496 - atomic_inc(&combined_event_count);
26497 + atomic_inc_unchecked(&combined_event_count);
26498 }
26499
26500 /**
26501 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26502 * Increment the counter of registered wakeup events and decrement the
26503 * couter of wakeup events in progress simultaneously.
26504 */
26505 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26506 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26507 }
26508
26509 /**
26510 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26511 index b0f553b..77b928b 100644
26512 --- a/drivers/block/cciss.c
26513 +++ b/drivers/block/cciss.c
26514 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26515 int err;
26516 u32 cp;
26517
26518 + memset(&arg64, 0, sizeof(arg64));
26519 +
26520 err = 0;
26521 err |=
26522 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26523 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26524 while (!list_empty(&h->reqQ)) {
26525 c = list_entry(h->reqQ.next, CommandList_struct, list);
26526 /* can't do anything if fifo is full */
26527 - if ((h->access.fifo_full(h))) {
26528 + if ((h->access->fifo_full(h))) {
26529 dev_warn(&h->pdev->dev, "fifo full\n");
26530 break;
26531 }
26532 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26533 h->Qdepth--;
26534
26535 /* Tell the controller execute command */
26536 - h->access.submit_command(h, c);
26537 + h->access->submit_command(h, c);
26538
26539 /* Put job onto the completed Q */
26540 addQ(&h->cmpQ, c);
26541 @@ -3443,17 +3445,17 @@ startio:
26542
26543 static inline unsigned long get_next_completion(ctlr_info_t *h)
26544 {
26545 - return h->access.command_completed(h);
26546 + return h->access->command_completed(h);
26547 }
26548
26549 static inline int interrupt_pending(ctlr_info_t *h)
26550 {
26551 - return h->access.intr_pending(h);
26552 + return h->access->intr_pending(h);
26553 }
26554
26555 static inline long interrupt_not_for_us(ctlr_info_t *h)
26556 {
26557 - return ((h->access.intr_pending(h) == 0) ||
26558 + return ((h->access->intr_pending(h) == 0) ||
26559 (h->interrupts_enabled == 0));
26560 }
26561
26562 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26563 u32 a;
26564
26565 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26566 - return h->access.command_completed(h);
26567 + return h->access->command_completed(h);
26568
26569 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26570 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26571 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26572 trans_support & CFGTBL_Trans_use_short_tags);
26573
26574 /* Change the access methods to the performant access methods */
26575 - h->access = SA5_performant_access;
26576 + h->access = &SA5_performant_access;
26577 h->transMethod = CFGTBL_Trans_Performant;
26578
26579 return;
26580 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26581 if (prod_index < 0)
26582 return -ENODEV;
26583 h->product_name = products[prod_index].product_name;
26584 - h->access = *(products[prod_index].access);
26585 + h->access = products[prod_index].access;
26586
26587 if (cciss_board_disabled(h)) {
26588 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26589 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26590 }
26591
26592 /* make sure the board interrupts are off */
26593 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26594 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26595 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26596 if (rc)
26597 goto clean2;
26598 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26599 * fake ones to scoop up any residual completions.
26600 */
26601 spin_lock_irqsave(&h->lock, flags);
26602 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26603 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26604 spin_unlock_irqrestore(&h->lock, flags);
26605 free_irq(h->intr[h->intr_mode], h);
26606 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26607 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26608 dev_info(&h->pdev->dev, "Board READY.\n");
26609 dev_info(&h->pdev->dev,
26610 "Waiting for stale completions to drain.\n");
26611 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26612 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26613 msleep(10000);
26614 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26615 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26616
26617 rc = controller_reset_failed(h->cfgtable);
26618 if (rc)
26619 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26620 cciss_scsi_setup(h);
26621
26622 /* Turn the interrupts on so we can service requests */
26623 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26624 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26625
26626 /* Get the firmware version */
26627 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26628 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26629 kfree(flush_buf);
26630 if (return_code != IO_OK)
26631 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26632 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26633 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26634 free_irq(h->intr[h->intr_mode], h);
26635 }
26636
26637 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26638 index 7fda30e..eb5dfe0 100644
26639 --- a/drivers/block/cciss.h
26640 +++ b/drivers/block/cciss.h
26641 @@ -101,7 +101,7 @@ struct ctlr_info
26642 /* information about each logical volume */
26643 drive_info_struct *drv[CISS_MAX_LUN];
26644
26645 - struct access_method access;
26646 + struct access_method *access;
26647
26648 /* queue and queue Info */
26649 struct list_head reqQ;
26650 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26651 index 9125bbe..eede5c8 100644
26652 --- a/drivers/block/cpqarray.c
26653 +++ b/drivers/block/cpqarray.c
26654 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26655 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26656 goto Enomem4;
26657 }
26658 - hba[i]->access.set_intr_mask(hba[i], 0);
26659 + hba[i]->access->set_intr_mask(hba[i], 0);
26660 if (request_irq(hba[i]->intr, do_ida_intr,
26661 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26662 {
26663 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26664 add_timer(&hba[i]->timer);
26665
26666 /* Enable IRQ now that spinlock and rate limit timer are set up */
26667 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26668 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26669
26670 for(j=0; j<NWD; j++) {
26671 struct gendisk *disk = ida_gendisk[i][j];
26672 @@ -694,7 +694,7 @@ DBGINFO(
26673 for(i=0; i<NR_PRODUCTS; i++) {
26674 if (board_id == products[i].board_id) {
26675 c->product_name = products[i].product_name;
26676 - c->access = *(products[i].access);
26677 + c->access = products[i].access;
26678 break;
26679 }
26680 }
26681 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26682 hba[ctlr]->intr = intr;
26683 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26684 hba[ctlr]->product_name = products[j].product_name;
26685 - hba[ctlr]->access = *(products[j].access);
26686 + hba[ctlr]->access = products[j].access;
26687 hba[ctlr]->ctlr = ctlr;
26688 hba[ctlr]->board_id = board_id;
26689 hba[ctlr]->pci_dev = NULL; /* not PCI */
26690 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26691
26692 while((c = h->reqQ) != NULL) {
26693 /* Can't do anything if we're busy */
26694 - if (h->access.fifo_full(h) == 0)
26695 + if (h->access->fifo_full(h) == 0)
26696 return;
26697
26698 /* Get the first entry from the request Q */
26699 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26700 h->Qdepth--;
26701
26702 /* Tell the controller to do our bidding */
26703 - h->access.submit_command(h, c);
26704 + h->access->submit_command(h, c);
26705
26706 /* Get onto the completion Q */
26707 addQ(&h->cmpQ, c);
26708 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26709 unsigned long flags;
26710 __u32 a,a1;
26711
26712 - istat = h->access.intr_pending(h);
26713 + istat = h->access->intr_pending(h);
26714 /* Is this interrupt for us? */
26715 if (istat == 0)
26716 return IRQ_NONE;
26717 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26718 */
26719 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26720 if (istat & FIFO_NOT_EMPTY) {
26721 - while((a = h->access.command_completed(h))) {
26722 + while((a = h->access->command_completed(h))) {
26723 a1 = a; a &= ~3;
26724 if ((c = h->cmpQ) == NULL)
26725 {
26726 @@ -1449,11 +1449,11 @@ static int sendcmd(
26727 /*
26728 * Disable interrupt
26729 */
26730 - info_p->access.set_intr_mask(info_p, 0);
26731 + info_p->access->set_intr_mask(info_p, 0);
26732 /* Make sure there is room in the command FIFO */
26733 /* Actually it should be completely empty at this time. */
26734 for (i = 200000; i > 0; i--) {
26735 - temp = info_p->access.fifo_full(info_p);
26736 + temp = info_p->access->fifo_full(info_p);
26737 if (temp != 0) {
26738 break;
26739 }
26740 @@ -1466,7 +1466,7 @@ DBG(
26741 /*
26742 * Send the cmd
26743 */
26744 - info_p->access.submit_command(info_p, c);
26745 + info_p->access->submit_command(info_p, c);
26746 complete = pollcomplete(ctlr);
26747
26748 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26749 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26750 * we check the new geometry. Then turn interrupts back on when
26751 * we're done.
26752 */
26753 - host->access.set_intr_mask(host, 0);
26754 + host->access->set_intr_mask(host, 0);
26755 getgeometry(ctlr);
26756 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26757 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26758
26759 for(i=0; i<NWD; i++) {
26760 struct gendisk *disk = ida_gendisk[ctlr][i];
26761 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26762 /* Wait (up to 2 seconds) for a command to complete */
26763
26764 for (i = 200000; i > 0; i--) {
26765 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26766 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26767 if (done == 0) {
26768 udelay(10); /* a short fixed delay */
26769 } else
26770 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26771 index be73e9d..7fbf140 100644
26772 --- a/drivers/block/cpqarray.h
26773 +++ b/drivers/block/cpqarray.h
26774 @@ -99,7 +99,7 @@ struct ctlr_info {
26775 drv_info_t drv[NWD];
26776 struct proc_dir_entry *proc;
26777
26778 - struct access_method access;
26779 + struct access_method *access;
26780
26781 cmdlist_t *reqQ;
26782 cmdlist_t *cmpQ;
26783 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26784 index 9cf2035..bffca95 100644
26785 --- a/drivers/block/drbd/drbd_int.h
26786 +++ b/drivers/block/drbd/drbd_int.h
26787 @@ -736,7 +736,7 @@ struct drbd_request;
26788 struct drbd_epoch {
26789 struct list_head list;
26790 unsigned int barrier_nr;
26791 - atomic_t epoch_size; /* increased on every request added. */
26792 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26793 atomic_t active; /* increased on every req. added, and dec on every finished. */
26794 unsigned long flags;
26795 };
26796 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26797 void *int_dig_in;
26798 void *int_dig_vv;
26799 wait_queue_head_t seq_wait;
26800 - atomic_t packet_seq;
26801 + atomic_unchecked_t packet_seq;
26802 unsigned int peer_seq;
26803 spinlock_t peer_seq_lock;
26804 unsigned int minor;
26805 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26806
26807 static inline void drbd_tcp_cork(struct socket *sock)
26808 {
26809 - int __user val = 1;
26810 + int val = 1;
26811 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26812 - (char __user *)&val, sizeof(val));
26813 + (char __force_user *)&val, sizeof(val));
26814 }
26815
26816 static inline void drbd_tcp_uncork(struct socket *sock)
26817 {
26818 - int __user val = 0;
26819 + int val = 0;
26820 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26821 - (char __user *)&val, sizeof(val));
26822 + (char __force_user *)&val, sizeof(val));
26823 }
26824
26825 static inline void drbd_tcp_nodelay(struct socket *sock)
26826 {
26827 - int __user val = 1;
26828 + int val = 1;
26829 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26830 - (char __user *)&val, sizeof(val));
26831 + (char __force_user *)&val, sizeof(val));
26832 }
26833
26834 static inline void drbd_tcp_quickack(struct socket *sock)
26835 {
26836 - int __user val = 2;
26837 + int val = 2;
26838 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26839 - (char __user *)&val, sizeof(val));
26840 + (char __force_user *)&val, sizeof(val));
26841 }
26842
26843 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26844 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26845 index 0358e55..bc33689 100644
26846 --- a/drivers/block/drbd/drbd_main.c
26847 +++ b/drivers/block/drbd/drbd_main.c
26848 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26849 p.sector = sector;
26850 p.block_id = block_id;
26851 p.blksize = blksize;
26852 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26853 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26854
26855 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26856 return false;
26857 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26858 p.sector = cpu_to_be64(req->sector);
26859 p.block_id = (unsigned long)req;
26860 p.seq_num = cpu_to_be32(req->seq_num =
26861 - atomic_add_return(1, &mdev->packet_seq));
26862 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26863
26864 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26865
26866 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26867 atomic_set(&mdev->unacked_cnt, 0);
26868 atomic_set(&mdev->local_cnt, 0);
26869 atomic_set(&mdev->net_cnt, 0);
26870 - atomic_set(&mdev->packet_seq, 0);
26871 + atomic_set_unchecked(&mdev->packet_seq, 0);
26872 atomic_set(&mdev->pp_in_use, 0);
26873 atomic_set(&mdev->pp_in_use_by_net, 0);
26874 atomic_set(&mdev->rs_sect_in, 0);
26875 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26876 mdev->receiver.t_state);
26877
26878 /* no need to lock it, I'm the only thread alive */
26879 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26880 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26881 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26882 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26883 mdev->al_writ_cnt =
26884 mdev->bm_writ_cnt =
26885 mdev->read_cnt =
26886 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26887 index af2a250..219c74b 100644
26888 --- a/drivers/block/drbd/drbd_nl.c
26889 +++ b/drivers/block/drbd/drbd_nl.c
26890 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26891 module_put(THIS_MODULE);
26892 }
26893
26894 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26895 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26896
26897 static unsigned short *
26898 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26899 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26900 cn_reply->id.idx = CN_IDX_DRBD;
26901 cn_reply->id.val = CN_VAL_DRBD;
26902
26903 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26904 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26905 cn_reply->ack = 0; /* not used here. */
26906 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26907 (int)((char *)tl - (char *)reply->tag_list);
26908 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26909 cn_reply->id.idx = CN_IDX_DRBD;
26910 cn_reply->id.val = CN_VAL_DRBD;
26911
26912 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26913 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26914 cn_reply->ack = 0; /* not used here. */
26915 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26916 (int)((char *)tl - (char *)reply->tag_list);
26917 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26918 cn_reply->id.idx = CN_IDX_DRBD;
26919 cn_reply->id.val = CN_VAL_DRBD;
26920
26921 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26922 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26923 cn_reply->ack = 0; // not used here.
26924 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26925 (int)((char*)tl - (char*)reply->tag_list);
26926 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26927 cn_reply->id.idx = CN_IDX_DRBD;
26928 cn_reply->id.val = CN_VAL_DRBD;
26929
26930 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26931 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26932 cn_reply->ack = 0; /* not used here. */
26933 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26934 (int)((char *)tl - (char *)reply->tag_list);
26935 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26936 index 43beaca..4a5b1dd 100644
26937 --- a/drivers/block/drbd/drbd_receiver.c
26938 +++ b/drivers/block/drbd/drbd_receiver.c
26939 @@ -894,7 +894,7 @@ retry:
26940 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26941 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26942
26943 - atomic_set(&mdev->packet_seq, 0);
26944 + atomic_set_unchecked(&mdev->packet_seq, 0);
26945 mdev->peer_seq = 0;
26946
26947 drbd_thread_start(&mdev->asender);
26948 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26949 do {
26950 next_epoch = NULL;
26951
26952 - epoch_size = atomic_read(&epoch->epoch_size);
26953 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26954
26955 switch (ev & ~EV_CLEANUP) {
26956 case EV_PUT:
26957 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26958 rv = FE_DESTROYED;
26959 } else {
26960 epoch->flags = 0;
26961 - atomic_set(&epoch->epoch_size, 0);
26962 + atomic_set_unchecked(&epoch->epoch_size, 0);
26963 /* atomic_set(&epoch->active, 0); is already zero */
26964 if (rv == FE_STILL_LIVE)
26965 rv = FE_RECYCLED;
26966 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26967 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26968 drbd_flush(mdev);
26969
26970 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26971 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26972 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26973 if (epoch)
26974 break;
26975 }
26976
26977 epoch = mdev->current_epoch;
26978 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26979 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26980
26981 D_ASSERT(atomic_read(&epoch->active) == 0);
26982 D_ASSERT(epoch->flags == 0);
26983 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26984 }
26985
26986 epoch->flags = 0;
26987 - atomic_set(&epoch->epoch_size, 0);
26988 + atomic_set_unchecked(&epoch->epoch_size, 0);
26989 atomic_set(&epoch->active, 0);
26990
26991 spin_lock(&mdev->epoch_lock);
26992 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26993 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26994 list_add(&epoch->list, &mdev->current_epoch->list);
26995 mdev->current_epoch = epoch;
26996 mdev->epochs++;
26997 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26998 spin_unlock(&mdev->peer_seq_lock);
26999
27000 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
27001 - atomic_inc(&mdev->current_epoch->epoch_size);
27002 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
27003 return drbd_drain_block(mdev, data_size);
27004 }
27005
27006 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
27007
27008 spin_lock(&mdev->epoch_lock);
27009 e->epoch = mdev->current_epoch;
27010 - atomic_inc(&e->epoch->epoch_size);
27011 + atomic_inc_unchecked(&e->epoch->epoch_size);
27012 atomic_inc(&e->epoch->active);
27013 spin_unlock(&mdev->epoch_lock);
27014
27015 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
27016 D_ASSERT(list_empty(&mdev->done_ee));
27017
27018 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
27019 - atomic_set(&mdev->current_epoch->epoch_size, 0);
27020 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
27021 D_ASSERT(list_empty(&mdev->current_epoch->list));
27022 }
27023
27024 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
27025 index 1e888c9..05cf1b0 100644
27026 --- a/drivers/block/loop.c
27027 +++ b/drivers/block/loop.c
27028 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
27029 mm_segment_t old_fs = get_fs();
27030
27031 set_fs(get_ds());
27032 - bw = file->f_op->write(file, buf, len, &pos);
27033 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
27034 set_fs(old_fs);
27035 if (likely(bw == len))
27036 return 0;
27037 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
27038 index 4364303..9adf4ee 100644
27039 --- a/drivers/char/Kconfig
27040 +++ b/drivers/char/Kconfig
27041 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
27042
27043 config DEVKMEM
27044 bool "/dev/kmem virtual device support"
27045 - default y
27046 + default n
27047 + depends on !GRKERNSEC_KMEM
27048 help
27049 Say Y here if you want to support the /dev/kmem device. The
27050 /dev/kmem device is rarely used, but can be used for certain
27051 @@ -596,6 +597,7 @@ config DEVPORT
27052 bool
27053 depends on !M68K
27054 depends on ISA || PCI
27055 + depends on !GRKERNSEC_KMEM
27056 default y
27057
27058 source "drivers/s390/char/Kconfig"
27059 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
27060 index 2e04433..22afc64 100644
27061 --- a/drivers/char/agp/frontend.c
27062 +++ b/drivers/char/agp/frontend.c
27063 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
27064 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27065 return -EFAULT;
27066
27067 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27068 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27069 return -EFAULT;
27070
27071 client = agp_find_client_by_pid(reserve.pid);
27072 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
27073 index 095ab90..afad0a4 100644
27074 --- a/drivers/char/briq_panel.c
27075 +++ b/drivers/char/briq_panel.c
27076 @@ -9,6 +9,7 @@
27077 #include <linux/types.h>
27078 #include <linux/errno.h>
27079 #include <linux/tty.h>
27080 +#include <linux/mutex.h>
27081 #include <linux/timer.h>
27082 #include <linux/kernel.h>
27083 #include <linux/wait.h>
27084 @@ -34,6 +35,7 @@ static int vfd_is_open;
27085 static unsigned char vfd[40];
27086 static int vfd_cursor;
27087 static unsigned char ledpb, led;
27088 +static DEFINE_MUTEX(vfd_mutex);
27089
27090 static void update_vfd(void)
27091 {
27092 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27093 if (!vfd_is_open)
27094 return -EBUSY;
27095
27096 + mutex_lock(&vfd_mutex);
27097 for (;;) {
27098 char c;
27099 if (!indx)
27100 break;
27101 - if (get_user(c, buf))
27102 + if (get_user(c, buf)) {
27103 + mutex_unlock(&vfd_mutex);
27104 return -EFAULT;
27105 + }
27106 if (esc) {
27107 set_led(c);
27108 esc = 0;
27109 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
27110 buf++;
27111 }
27112 update_vfd();
27113 + mutex_unlock(&vfd_mutex);
27114
27115 return len;
27116 }
27117 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
27118 index f773a9d..65cd683 100644
27119 --- a/drivers/char/genrtc.c
27120 +++ b/drivers/char/genrtc.c
27121 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
27122 switch (cmd) {
27123
27124 case RTC_PLL_GET:
27125 + memset(&pll, 0, sizeof(pll));
27126 if (get_rtc_pll(&pll))
27127 return -EINVAL;
27128 else
27129 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
27130 index 0833896..cccce52 100644
27131 --- a/drivers/char/hpet.c
27132 +++ b/drivers/char/hpet.c
27133 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
27134 }
27135
27136 static int
27137 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
27138 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
27139 struct hpet_info *info)
27140 {
27141 struct hpet_timer __iomem *timer;
27142 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
27143 index 58c0e63..46c16bf 100644
27144 --- a/drivers/char/ipmi/ipmi_msghandler.c
27145 +++ b/drivers/char/ipmi/ipmi_msghandler.c
27146 @@ -415,7 +415,7 @@ struct ipmi_smi {
27147 struct proc_dir_entry *proc_dir;
27148 char proc_dir_name[10];
27149
27150 - atomic_t stats[IPMI_NUM_STATS];
27151 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27152
27153 /*
27154 * run_to_completion duplicate of smb_info, smi_info
27155 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27156
27157
27158 #define ipmi_inc_stat(intf, stat) \
27159 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27160 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27161 #define ipmi_get_stat(intf, stat) \
27162 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27163 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27164
27165 static int is_lan_addr(struct ipmi_addr *addr)
27166 {
27167 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
27168 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27169 init_waitqueue_head(&intf->waitq);
27170 for (i = 0; i < IPMI_NUM_STATS; i++)
27171 - atomic_set(&intf->stats[i], 0);
27172 + atomic_set_unchecked(&intf->stats[i], 0);
27173
27174 intf->proc_dir = NULL;
27175
27176 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27177 index 9397ab4..d01bee1 100644
27178 --- a/drivers/char/ipmi/ipmi_si_intf.c
27179 +++ b/drivers/char/ipmi/ipmi_si_intf.c
27180 @@ -277,7 +277,7 @@ struct smi_info {
27181 unsigned char slave_addr;
27182
27183 /* Counters and things for the proc filesystem. */
27184 - atomic_t stats[SI_NUM_STATS];
27185 + atomic_unchecked_t stats[SI_NUM_STATS];
27186
27187 struct task_struct *thread;
27188
27189 @@ -286,9 +286,9 @@ struct smi_info {
27190 };
27191
27192 #define smi_inc_stat(smi, stat) \
27193 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27194 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27195 #define smi_get_stat(smi, stat) \
27196 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27197 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27198
27199 #define SI_MAX_PARMS 4
27200
27201 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27202 atomic_set(&new_smi->req_events, 0);
27203 new_smi->run_to_completion = 0;
27204 for (i = 0; i < SI_NUM_STATS; i++)
27205 - atomic_set(&new_smi->stats[i], 0);
27206 + atomic_set_unchecked(&new_smi->stats[i], 0);
27207
27208 new_smi->interrupt_disabled = 1;
27209 atomic_set(&new_smi->stop_operation, 0);
27210 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27211 index 1aeaaba..e018570 100644
27212 --- a/drivers/char/mbcs.c
27213 +++ b/drivers/char/mbcs.c
27214 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27215 return 0;
27216 }
27217
27218 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27219 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27220 {
27221 .part_num = MBCS_PART_NUM,
27222 .mfg_num = MBCS_MFG_NUM,
27223 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27224 index 1451790..f705c30 100644
27225 --- a/drivers/char/mem.c
27226 +++ b/drivers/char/mem.c
27227 @@ -18,6 +18,7 @@
27228 #include <linux/raw.h>
27229 #include <linux/tty.h>
27230 #include <linux/capability.h>
27231 +#include <linux/security.h>
27232 #include <linux/ptrace.h>
27233 #include <linux/device.h>
27234 #include <linux/highmem.h>
27235 @@ -35,6 +36,10 @@
27236 # include <linux/efi.h>
27237 #endif
27238
27239 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27240 +extern const struct file_operations grsec_fops;
27241 +#endif
27242 +
27243 static inline unsigned long size_inside_page(unsigned long start,
27244 unsigned long size)
27245 {
27246 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27247
27248 while (cursor < to) {
27249 if (!devmem_is_allowed(pfn)) {
27250 +#ifdef CONFIG_GRKERNSEC_KMEM
27251 + gr_handle_mem_readwrite(from, to);
27252 +#else
27253 printk(KERN_INFO
27254 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27255 current->comm, from, to);
27256 +#endif
27257 return 0;
27258 }
27259 cursor += PAGE_SIZE;
27260 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27261 }
27262 return 1;
27263 }
27264 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27265 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27266 +{
27267 + return 0;
27268 +}
27269 #else
27270 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27271 {
27272 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27273
27274 while (count > 0) {
27275 unsigned long remaining;
27276 + char *temp;
27277
27278 sz = size_inside_page(p, count);
27279
27280 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27281 if (!ptr)
27282 return -EFAULT;
27283
27284 - remaining = copy_to_user(buf, ptr, sz);
27285 +#ifdef CONFIG_PAX_USERCOPY
27286 + temp = kmalloc(sz, GFP_KERNEL);
27287 + if (!temp) {
27288 + unxlate_dev_mem_ptr(p, ptr);
27289 + return -ENOMEM;
27290 + }
27291 + memcpy(temp, ptr, sz);
27292 +#else
27293 + temp = ptr;
27294 +#endif
27295 +
27296 + remaining = copy_to_user(buf, temp, sz);
27297 +
27298 +#ifdef CONFIG_PAX_USERCOPY
27299 + kfree(temp);
27300 +#endif
27301 +
27302 unxlate_dev_mem_ptr(p, ptr);
27303 if (remaining)
27304 return -EFAULT;
27305 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27306 size_t count, loff_t *ppos)
27307 {
27308 unsigned long p = *ppos;
27309 - ssize_t low_count, read, sz;
27310 + ssize_t low_count, read, sz, err = 0;
27311 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27312 - int err = 0;
27313
27314 read = 0;
27315 if (p < (unsigned long) high_memory) {
27316 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27317 }
27318 #endif
27319 while (low_count > 0) {
27320 + char *temp;
27321 +
27322 sz = size_inside_page(p, low_count);
27323
27324 /*
27325 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27326 */
27327 kbuf = xlate_dev_kmem_ptr((char *)p);
27328
27329 - if (copy_to_user(buf, kbuf, sz))
27330 +#ifdef CONFIG_PAX_USERCOPY
27331 + temp = kmalloc(sz, GFP_KERNEL);
27332 + if (!temp)
27333 + return -ENOMEM;
27334 + memcpy(temp, kbuf, sz);
27335 +#else
27336 + temp = kbuf;
27337 +#endif
27338 +
27339 + err = copy_to_user(buf, temp, sz);
27340 +
27341 +#ifdef CONFIG_PAX_USERCOPY
27342 + kfree(temp);
27343 +#endif
27344 +
27345 + if (err)
27346 return -EFAULT;
27347 buf += sz;
27348 p += sz;
27349 @@ -867,6 +914,9 @@ static const struct memdev {
27350 #ifdef CONFIG_CRASH_DUMP
27351 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27352 #endif
27353 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27354 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27355 +#endif
27356 };
27357
27358 static int memory_open(struct inode *inode, struct file *filp)
27359 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27360 index da3cfee..a5a6606 100644
27361 --- a/drivers/char/nvram.c
27362 +++ b/drivers/char/nvram.c
27363 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27364
27365 spin_unlock_irq(&rtc_lock);
27366
27367 - if (copy_to_user(buf, contents, tmp - contents))
27368 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27369 return -EFAULT;
27370
27371 *ppos = i;
27372 diff --git a/drivers/char/random.c b/drivers/char/random.c
27373 index 6035ab8..bdfe4fd 100644
27374 --- a/drivers/char/random.c
27375 +++ b/drivers/char/random.c
27376 @@ -261,8 +261,13 @@
27377 /*
27378 * Configuration information
27379 */
27380 +#ifdef CONFIG_GRKERNSEC_RANDNET
27381 +#define INPUT_POOL_WORDS 512
27382 +#define OUTPUT_POOL_WORDS 128
27383 +#else
27384 #define INPUT_POOL_WORDS 128
27385 #define OUTPUT_POOL_WORDS 32
27386 +#endif
27387 #define SEC_XFER_SIZE 512
27388 #define EXTRACT_SIZE 10
27389
27390 @@ -300,10 +305,17 @@ static struct poolinfo {
27391 int poolwords;
27392 int tap1, tap2, tap3, tap4, tap5;
27393 } poolinfo_table[] = {
27394 +#ifdef CONFIG_GRKERNSEC_RANDNET
27395 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27396 + { 512, 411, 308, 208, 104, 1 },
27397 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27398 + { 128, 103, 76, 51, 25, 1 },
27399 +#else
27400 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27401 { 128, 103, 76, 51, 25, 1 },
27402 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27403 { 32, 26, 20, 14, 7, 1 },
27404 +#endif
27405 #if 0
27406 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27407 { 2048, 1638, 1231, 819, 411, 1 },
27408 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27409
27410 extract_buf(r, tmp);
27411 i = min_t(int, nbytes, EXTRACT_SIZE);
27412 - if (copy_to_user(buf, tmp, i)) {
27413 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27414 ret = -EFAULT;
27415 break;
27416 }
27417 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27418 #include <linux/sysctl.h>
27419
27420 static int min_read_thresh = 8, min_write_thresh;
27421 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27422 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27423 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27424 static char sysctl_bootid[16];
27425
27426 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27427 index 1ee8ce7..b778bef 100644
27428 --- a/drivers/char/sonypi.c
27429 +++ b/drivers/char/sonypi.c
27430 @@ -55,6 +55,7 @@
27431 #include <asm/uaccess.h>
27432 #include <asm/io.h>
27433 #include <asm/system.h>
27434 +#include <asm/local.h>
27435
27436 #include <linux/sonypi.h>
27437
27438 @@ -491,7 +492,7 @@ static struct sonypi_device {
27439 spinlock_t fifo_lock;
27440 wait_queue_head_t fifo_proc_list;
27441 struct fasync_struct *fifo_async;
27442 - int open_count;
27443 + local_t open_count;
27444 int model;
27445 struct input_dev *input_jog_dev;
27446 struct input_dev *input_key_dev;
27447 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27448 static int sonypi_misc_release(struct inode *inode, struct file *file)
27449 {
27450 mutex_lock(&sonypi_device.lock);
27451 - sonypi_device.open_count--;
27452 + local_dec(&sonypi_device.open_count);
27453 mutex_unlock(&sonypi_device.lock);
27454 return 0;
27455 }
27456 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27457 {
27458 mutex_lock(&sonypi_device.lock);
27459 /* Flush input queue on first open */
27460 - if (!sonypi_device.open_count)
27461 + if (!local_read(&sonypi_device.open_count))
27462 kfifo_reset(&sonypi_device.fifo);
27463 - sonypi_device.open_count++;
27464 + local_inc(&sonypi_device.open_count);
27465 mutex_unlock(&sonypi_device.lock);
27466
27467 return 0;
27468 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27469 index 361a1df..2471eee 100644
27470 --- a/drivers/char/tpm/tpm.c
27471 +++ b/drivers/char/tpm/tpm.c
27472 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27473 chip->vendor.req_complete_val)
27474 goto out_recv;
27475
27476 - if ((status == chip->vendor.req_canceled)) {
27477 + if (status == chip->vendor.req_canceled) {
27478 dev_err(chip->dev, "Operation Canceled\n");
27479 rc = -ECANCELED;
27480 goto out;
27481 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27482 index 0636520..169c1d0 100644
27483 --- a/drivers/char/tpm/tpm_bios.c
27484 +++ b/drivers/char/tpm/tpm_bios.c
27485 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27486 event = addr;
27487
27488 if ((event->event_type == 0 && event->event_size == 0) ||
27489 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27490 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27491 return NULL;
27492
27493 return addr;
27494 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27495 return NULL;
27496
27497 if ((event->event_type == 0 && event->event_size == 0) ||
27498 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27499 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27500 return NULL;
27501
27502 (*pos)++;
27503 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27504 int i;
27505
27506 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27507 - seq_putc(m, data[i]);
27508 + if (!seq_putc(m, data[i]))
27509 + return -EFAULT;
27510
27511 return 0;
27512 }
27513 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27514 log->bios_event_log_end = log->bios_event_log + len;
27515
27516 virt = acpi_os_map_memory(start, len);
27517 + if (!virt) {
27518 + kfree(log->bios_event_log);
27519 + log->bios_event_log = NULL;
27520 + return -EFAULT;
27521 + }
27522
27523 - memcpy(log->bios_event_log, virt, len);
27524 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27525
27526 acpi_os_unmap_memory(virt, len);
27527 return 0;
27528 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27529 index 8e3c46d..c139b99 100644
27530 --- a/drivers/char/virtio_console.c
27531 +++ b/drivers/char/virtio_console.c
27532 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27533 if (to_user) {
27534 ssize_t ret;
27535
27536 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27537 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27538 if (ret)
27539 return -EFAULT;
27540 } else {
27541 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27542 if (!port_has_data(port) && !port->host_connected)
27543 return 0;
27544
27545 - return fill_readbuf(port, ubuf, count, true);
27546 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27547 }
27548
27549 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27550 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27551 index eb1d864..39ee5a7 100644
27552 --- a/drivers/dma/dmatest.c
27553 +++ b/drivers/dma/dmatest.c
27554 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27555 }
27556 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27557 cnt = dmatest_add_threads(dtc, DMA_PQ);
27558 - thread_count += cnt > 0 ?: 0;
27559 + thread_count += cnt > 0 ? cnt : 0;
27560 }
27561
27562 pr_info("dmatest: Started %u threads using %s\n",
27563 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27564 index c9eee6d..f9d5280 100644
27565 --- a/drivers/edac/amd64_edac.c
27566 +++ b/drivers/edac/amd64_edac.c
27567 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27568 * PCI core identifies what devices are on a system during boot, and then
27569 * inquiry this table to see if this driver is for a given device found.
27570 */
27571 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27572 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27573 {
27574 .vendor = PCI_VENDOR_ID_AMD,
27575 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27576 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27577 index e47e73b..348e0bd 100644
27578 --- a/drivers/edac/amd76x_edac.c
27579 +++ b/drivers/edac/amd76x_edac.c
27580 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27581 edac_mc_free(mci);
27582 }
27583
27584 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27585 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27586 {
27587 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27588 AMD762},
27589 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27590 index 1af531a..3a8ff27 100644
27591 --- a/drivers/edac/e752x_edac.c
27592 +++ b/drivers/edac/e752x_edac.c
27593 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27594 edac_mc_free(mci);
27595 }
27596
27597 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27598 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27599 {
27600 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27601 E7520},
27602 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27603 index 6ffb6d2..383d8d7 100644
27604 --- a/drivers/edac/e7xxx_edac.c
27605 +++ b/drivers/edac/e7xxx_edac.c
27606 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27607 edac_mc_free(mci);
27608 }
27609
27610 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27611 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27612 {
27613 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27614 E7205},
27615 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27616 index 495198a..ac08c85 100644
27617 --- a/drivers/edac/edac_pci_sysfs.c
27618 +++ b/drivers/edac/edac_pci_sysfs.c
27619 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27620 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27621 static int edac_pci_poll_msec = 1000; /* one second workq period */
27622
27623 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27624 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27625 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27626 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27627
27628 static struct kobject *edac_pci_top_main_kobj;
27629 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27630 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27631 edac_printk(KERN_CRIT, EDAC_PCI,
27632 "Signaled System Error on %s\n",
27633 pci_name(dev));
27634 - atomic_inc(&pci_nonparity_count);
27635 + atomic_inc_unchecked(&pci_nonparity_count);
27636 }
27637
27638 if (status & (PCI_STATUS_PARITY)) {
27639 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27640 "Master Data Parity Error on %s\n",
27641 pci_name(dev));
27642
27643 - atomic_inc(&pci_parity_count);
27644 + atomic_inc_unchecked(&pci_parity_count);
27645 }
27646
27647 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27648 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27649 "Detected Parity Error on %s\n",
27650 pci_name(dev));
27651
27652 - atomic_inc(&pci_parity_count);
27653 + atomic_inc_unchecked(&pci_parity_count);
27654 }
27655 }
27656
27657 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27658 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27659 "Signaled System Error on %s\n",
27660 pci_name(dev));
27661 - atomic_inc(&pci_nonparity_count);
27662 + atomic_inc_unchecked(&pci_nonparity_count);
27663 }
27664
27665 if (status & (PCI_STATUS_PARITY)) {
27666 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27667 "Master Data Parity Error on "
27668 "%s\n", pci_name(dev));
27669
27670 - atomic_inc(&pci_parity_count);
27671 + atomic_inc_unchecked(&pci_parity_count);
27672 }
27673
27674 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27675 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27676 "Detected Parity Error on %s\n",
27677 pci_name(dev));
27678
27679 - atomic_inc(&pci_parity_count);
27680 + atomic_inc_unchecked(&pci_parity_count);
27681 }
27682 }
27683 }
27684 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27685 if (!check_pci_errors)
27686 return;
27687
27688 - before_count = atomic_read(&pci_parity_count);
27689 + before_count = atomic_read_unchecked(&pci_parity_count);
27690
27691 /* scan all PCI devices looking for a Parity Error on devices and
27692 * bridges.
27693 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27694 /* Only if operator has selected panic on PCI Error */
27695 if (edac_pci_get_panic_on_pe()) {
27696 /* If the count is different 'after' from 'before' */
27697 - if (before_count != atomic_read(&pci_parity_count))
27698 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27699 panic("EDAC: PCI Parity Error");
27700 }
27701 }
27702 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27703 index c0510b3..6e2a954 100644
27704 --- a/drivers/edac/i3000_edac.c
27705 +++ b/drivers/edac/i3000_edac.c
27706 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27707 edac_mc_free(mci);
27708 }
27709
27710 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27711 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27712 {
27713 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27714 I3000},
27715 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27716 index aa08497..7e6822a 100644
27717 --- a/drivers/edac/i3200_edac.c
27718 +++ b/drivers/edac/i3200_edac.c
27719 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27720 edac_mc_free(mci);
27721 }
27722
27723 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27724 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27725 {
27726 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27727 I3200},
27728 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27729 index 4dc3ac2..67d05a6 100644
27730 --- a/drivers/edac/i5000_edac.c
27731 +++ b/drivers/edac/i5000_edac.c
27732 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27733 *
27734 * The "E500P" device is the first device supported.
27735 */
27736 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27737 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27738 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27739 .driver_data = I5000P},
27740
27741 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27742 index bcbdeec..9886d16 100644
27743 --- a/drivers/edac/i5100_edac.c
27744 +++ b/drivers/edac/i5100_edac.c
27745 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27746 edac_mc_free(mci);
27747 }
27748
27749 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27750 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27751 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27752 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27753 { 0, }
27754 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27755 index 74d6ec34..baff517 100644
27756 --- a/drivers/edac/i5400_edac.c
27757 +++ b/drivers/edac/i5400_edac.c
27758 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27759 *
27760 * The "E500P" device is the first device supported.
27761 */
27762 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27763 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27764 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27765 {0,} /* 0 terminated list. */
27766 };
27767 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27768 index 6104dba..e7ea8e1 100644
27769 --- a/drivers/edac/i7300_edac.c
27770 +++ b/drivers/edac/i7300_edac.c
27771 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27772 *
27773 * Has only 8086:360c PCI ID
27774 */
27775 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27776 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27777 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27778 {0,} /* 0 terminated list. */
27779 };
27780 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27781 index 70ad892..178943c 100644
27782 --- a/drivers/edac/i7core_edac.c
27783 +++ b/drivers/edac/i7core_edac.c
27784 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27785 /*
27786 * pci_device_id table for which devices we are looking for
27787 */
27788 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27789 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27790 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27791 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27792 {0,} /* 0 terminated list. */
27793 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27794 index 4329d39..f3022ef 100644
27795 --- a/drivers/edac/i82443bxgx_edac.c
27796 +++ b/drivers/edac/i82443bxgx_edac.c
27797 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27798
27799 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27800
27801 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27802 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27803 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27804 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27805 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27806 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27807 index 931a057..fd28340 100644
27808 --- a/drivers/edac/i82860_edac.c
27809 +++ b/drivers/edac/i82860_edac.c
27810 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27811 edac_mc_free(mci);
27812 }
27813
27814 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27815 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27816 {
27817 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27818 I82860},
27819 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27820 index 33864c6..01edc61 100644
27821 --- a/drivers/edac/i82875p_edac.c
27822 +++ b/drivers/edac/i82875p_edac.c
27823 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27824 edac_mc_free(mci);
27825 }
27826
27827 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27828 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27829 {
27830 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27831 I82875P},
27832 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27833 index a5da732..983363b 100644
27834 --- a/drivers/edac/i82975x_edac.c
27835 +++ b/drivers/edac/i82975x_edac.c
27836 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27837 edac_mc_free(mci);
27838 }
27839
27840 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27841 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27842 {
27843 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27844 I82975X
27845 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27846 index 0106747..0b40417 100644
27847 --- a/drivers/edac/mce_amd.h
27848 +++ b/drivers/edac/mce_amd.h
27849 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27850 bool (*dc_mce)(u16, u8);
27851 bool (*ic_mce)(u16, u8);
27852 bool (*nb_mce)(u16, u8);
27853 -};
27854 +} __no_const;
27855
27856 void amd_report_gart_errors(bool);
27857 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27858 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27859 index b153674..ad2ba9b 100644
27860 --- a/drivers/edac/r82600_edac.c
27861 +++ b/drivers/edac/r82600_edac.c
27862 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27863 edac_mc_free(mci);
27864 }
27865
27866 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27867 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27868 {
27869 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27870 },
27871 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27872 index 7a402bf..af0b211 100644
27873 --- a/drivers/edac/sb_edac.c
27874 +++ b/drivers/edac/sb_edac.c
27875 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27876 /*
27877 * pci_device_id table for which devices we are looking for
27878 */
27879 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27880 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27881 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27882 {0,} /* 0 terminated list. */
27883 };
27884 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27885 index b6f47de..c5acf3a 100644
27886 --- a/drivers/edac/x38_edac.c
27887 +++ b/drivers/edac/x38_edac.c
27888 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27889 edac_mc_free(mci);
27890 }
27891
27892 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27893 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27894 {
27895 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27896 X38},
27897 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27898 index 85661b0..c784559a 100644
27899 --- a/drivers/firewire/core-card.c
27900 +++ b/drivers/firewire/core-card.c
27901 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27902
27903 void fw_core_remove_card(struct fw_card *card)
27904 {
27905 - struct fw_card_driver dummy_driver = dummy_driver_template;
27906 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27907
27908 card->driver->update_phy_reg(card, 4,
27909 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27910 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27911 index 4799393..37bd3ab 100644
27912 --- a/drivers/firewire/core-cdev.c
27913 +++ b/drivers/firewire/core-cdev.c
27914 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27915 int ret;
27916
27917 if ((request->channels == 0 && request->bandwidth == 0) ||
27918 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27919 - request->bandwidth < 0)
27920 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27921 return -EINVAL;
27922
27923 r = kmalloc(sizeof(*r), GFP_KERNEL);
27924 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27925 index 855ab3f..11f4bbd 100644
27926 --- a/drivers/firewire/core-transaction.c
27927 +++ b/drivers/firewire/core-transaction.c
27928 @@ -37,6 +37,7 @@
27929 #include <linux/timer.h>
27930 #include <linux/types.h>
27931 #include <linux/workqueue.h>
27932 +#include <linux/sched.h>
27933
27934 #include <asm/byteorder.h>
27935
27936 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27937 index b45be57..5fad18b 100644
27938 --- a/drivers/firewire/core.h
27939 +++ b/drivers/firewire/core.h
27940 @@ -101,6 +101,7 @@ struct fw_card_driver {
27941
27942 int (*stop_iso)(struct fw_iso_context *ctx);
27943 };
27944 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27945
27946 void fw_card_initialize(struct fw_card *card,
27947 const struct fw_card_driver *driver, struct device *device);
27948 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27949 index 153980b..4b4d046 100644
27950 --- a/drivers/firmware/dmi_scan.c
27951 +++ b/drivers/firmware/dmi_scan.c
27952 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27953 }
27954 }
27955 else {
27956 - /*
27957 - * no iounmap() for that ioremap(); it would be a no-op, but
27958 - * it's so early in setup that sucker gets confused into doing
27959 - * what it shouldn't if we actually call it.
27960 - */
27961 p = dmi_ioremap(0xF0000, 0x10000);
27962 if (p == NULL)
27963 goto error;
27964 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27965 if (buf == NULL)
27966 return -1;
27967
27968 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27969 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27970
27971 iounmap(buf);
27972 return 0;
27973 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27974 index 98723cb..10ca85b 100644
27975 --- a/drivers/gpio/gpio-vr41xx.c
27976 +++ b/drivers/gpio/gpio-vr41xx.c
27977 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27978 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27979 maskl, pendl, maskh, pendh);
27980
27981 - atomic_inc(&irq_err_count);
27982 + atomic_inc_unchecked(&irq_err_count);
27983
27984 return -EINVAL;
27985 }
27986 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27987 index 8323fc3..5c1d755 100644
27988 --- a/drivers/gpu/drm/drm_crtc.c
27989 +++ b/drivers/gpu/drm/drm_crtc.c
27990 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27991 */
27992 if ((out_resp->count_modes >= mode_count) && mode_count) {
27993 copied = 0;
27994 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27995 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27996 list_for_each_entry(mode, &connector->modes, head) {
27997 drm_crtc_convert_to_umode(&u_mode, mode);
27998 if (copy_to_user(mode_ptr + copied,
27999 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28000
28001 if ((out_resp->count_props >= props_count) && props_count) {
28002 copied = 0;
28003 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
28004 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
28005 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
28006 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
28007 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
28008 if (connector->property_ids[i] != 0) {
28009 if (put_user(connector->property_ids[i],
28010 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
28011
28012 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
28013 copied = 0;
28014 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
28015 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
28016 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
28017 if (connector->encoder_ids[i] != 0) {
28018 if (put_user(connector->encoder_ids[i],
28019 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
28020 }
28021
28022 for (i = 0; i < crtc_req->count_connectors; i++) {
28023 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
28024 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
28025 if (get_user(out_id, &set_connectors_ptr[i])) {
28026 ret = -EFAULT;
28027 goto out;
28028 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
28029 fb = obj_to_fb(obj);
28030
28031 num_clips = r->num_clips;
28032 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
28033 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
28034
28035 if (!num_clips != !clips_ptr) {
28036 ret = -EINVAL;
28037 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28038 out_resp->flags = property->flags;
28039
28040 if ((out_resp->count_values >= value_count) && value_count) {
28041 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
28042 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
28043 for (i = 0; i < value_count; i++) {
28044 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
28045 ret = -EFAULT;
28046 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28047 if (property->flags & DRM_MODE_PROP_ENUM) {
28048 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
28049 copied = 0;
28050 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
28051 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
28052 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
28053
28054 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
28055 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
28056 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
28057 copied = 0;
28058 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
28059 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
28060 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
28061
28062 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
28063 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
28064 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28065 struct drm_mode_get_blob *out_resp = data;
28066 struct drm_property_blob *blob;
28067 int ret = 0;
28068 - void *blob_ptr;
28069 + void __user *blob_ptr;
28070
28071 if (!drm_core_check_feature(dev, DRIVER_MODESET))
28072 return -EINVAL;
28073 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
28074 blob = obj_to_blob(obj);
28075
28076 if (out_resp->length == blob->length) {
28077 - blob_ptr = (void *)(unsigned long)out_resp->data;
28078 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
28079 if (copy_to_user(blob_ptr, blob->data, blob->length)){
28080 ret = -EFAULT;
28081 goto done;
28082 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
28083 index d2619d7..bd6bd00 100644
28084 --- a/drivers/gpu/drm/drm_crtc_helper.c
28085 +++ b/drivers/gpu/drm/drm_crtc_helper.c
28086 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
28087 struct drm_crtc *tmp;
28088 int crtc_mask = 1;
28089
28090 - WARN(!crtc, "checking null crtc?\n");
28091 + BUG_ON(!crtc);
28092
28093 dev = crtc->dev;
28094
28095 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
28096 index 40c187c..5746164 100644
28097 --- a/drivers/gpu/drm/drm_drv.c
28098 +++ b/drivers/gpu/drm/drm_drv.c
28099 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
28100 /**
28101 * Copy and IOCTL return string to user space
28102 */
28103 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
28104 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
28105 {
28106 int len;
28107
28108 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
28109
28110 dev = file_priv->minor->dev;
28111 atomic_inc(&dev->ioctl_count);
28112 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28113 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28114 ++file_priv->ioctl_count;
28115
28116 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28117 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
28118 index 828bf65..cdaa0e9 100644
28119 --- a/drivers/gpu/drm/drm_fops.c
28120 +++ b/drivers/gpu/drm/drm_fops.c
28121 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
28122 }
28123
28124 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28125 - atomic_set(&dev->counts[i], 0);
28126 + atomic_set_unchecked(&dev->counts[i], 0);
28127
28128 dev->sigdata.lock = NULL;
28129
28130 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
28131
28132 retcode = drm_open_helper(inode, filp, dev);
28133 if (!retcode) {
28134 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28135 - if (!dev->open_count++)
28136 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28137 + if (local_inc_return(&dev->open_count) == 1)
28138 retcode = drm_setup(dev);
28139 }
28140 if (!retcode) {
28141 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
28142
28143 mutex_lock(&drm_global_mutex);
28144
28145 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28146 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28147
28148 if (dev->driver->preclose)
28149 dev->driver->preclose(dev, file_priv);
28150 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
28151 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28152 task_pid_nr(current),
28153 (long)old_encode_dev(file_priv->minor->device),
28154 - dev->open_count);
28155 + local_read(&dev->open_count));
28156
28157 /* Release any auth tokens that might point to this file_priv,
28158 (do that under the drm_global_mutex) */
28159 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
28160 * End inline drm_release
28161 */
28162
28163 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28164 - if (!--dev->open_count) {
28165 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28166 + if (local_dec_and_test(&dev->open_count)) {
28167 if (atomic_read(&dev->ioctl_count)) {
28168 DRM_ERROR("Device busy: %d\n",
28169 atomic_read(&dev->ioctl_count));
28170 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
28171 index c87dc96..326055d 100644
28172 --- a/drivers/gpu/drm/drm_global.c
28173 +++ b/drivers/gpu/drm/drm_global.c
28174 @@ -36,7 +36,7 @@
28175 struct drm_global_item {
28176 struct mutex mutex;
28177 void *object;
28178 - int refcount;
28179 + atomic_t refcount;
28180 };
28181
28182 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28183 @@ -49,7 +49,7 @@ void drm_global_init(void)
28184 struct drm_global_item *item = &glob[i];
28185 mutex_init(&item->mutex);
28186 item->object = NULL;
28187 - item->refcount = 0;
28188 + atomic_set(&item->refcount, 0);
28189 }
28190 }
28191
28192 @@ -59,7 +59,7 @@ void drm_global_release(void)
28193 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28194 struct drm_global_item *item = &glob[i];
28195 BUG_ON(item->object != NULL);
28196 - BUG_ON(item->refcount != 0);
28197 + BUG_ON(atomic_read(&item->refcount) != 0);
28198 }
28199 }
28200
28201 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28202 void *object;
28203
28204 mutex_lock(&item->mutex);
28205 - if (item->refcount == 0) {
28206 + if (atomic_read(&item->refcount) == 0) {
28207 item->object = kzalloc(ref->size, GFP_KERNEL);
28208 if (unlikely(item->object == NULL)) {
28209 ret = -ENOMEM;
28210 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28211 goto out_err;
28212
28213 }
28214 - ++item->refcount;
28215 + atomic_inc(&item->refcount);
28216 ref->object = item->object;
28217 object = item->object;
28218 mutex_unlock(&item->mutex);
28219 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28220 struct drm_global_item *item = &glob[ref->global_type];
28221
28222 mutex_lock(&item->mutex);
28223 - BUG_ON(item->refcount == 0);
28224 + BUG_ON(atomic_read(&item->refcount) == 0);
28225 BUG_ON(ref->object != item->object);
28226 - if (--item->refcount == 0) {
28227 + if (atomic_dec_and_test(&item->refcount)) {
28228 ref->release(ref);
28229 item->object = NULL;
28230 }
28231 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28232 index ab1162d..42587b2 100644
28233 --- a/drivers/gpu/drm/drm_info.c
28234 +++ b/drivers/gpu/drm/drm_info.c
28235 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28236 struct drm_local_map *map;
28237 struct drm_map_list *r_list;
28238
28239 - /* Hardcoded from _DRM_FRAME_BUFFER,
28240 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28241 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28242 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28243 + static const char * const types[] = {
28244 + [_DRM_FRAME_BUFFER] = "FB",
28245 + [_DRM_REGISTERS] = "REG",
28246 + [_DRM_SHM] = "SHM",
28247 + [_DRM_AGP] = "AGP",
28248 + [_DRM_SCATTER_GATHER] = "SG",
28249 + [_DRM_CONSISTENT] = "PCI",
28250 + [_DRM_GEM] = "GEM" };
28251 const char *type;
28252 int i;
28253
28254 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28255 map = r_list->map;
28256 if (!map)
28257 continue;
28258 - if (map->type < 0 || map->type > 5)
28259 + if (map->type >= ARRAY_SIZE(types))
28260 type = "??";
28261 else
28262 type = types[map->type];
28263 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28264 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28265 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28266 vma->vm_flags & VM_IO ? 'i' : '-',
28267 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28268 + 0);
28269 +#else
28270 vma->vm_pgoff);
28271 +#endif
28272
28273 #if defined(__i386__)
28274 pgprot = pgprot_val(vma->vm_page_prot);
28275 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28276 index ddd70db..40321e6 100644
28277 --- a/drivers/gpu/drm/drm_ioc32.c
28278 +++ b/drivers/gpu/drm/drm_ioc32.c
28279 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28280 request = compat_alloc_user_space(nbytes);
28281 if (!access_ok(VERIFY_WRITE, request, nbytes))
28282 return -EFAULT;
28283 - list = (struct drm_buf_desc *) (request + 1);
28284 + list = (struct drm_buf_desc __user *) (request + 1);
28285
28286 if (__put_user(count, &request->count)
28287 || __put_user(list, &request->list))
28288 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28289 request = compat_alloc_user_space(nbytes);
28290 if (!access_ok(VERIFY_WRITE, request, nbytes))
28291 return -EFAULT;
28292 - list = (struct drm_buf_pub *) (request + 1);
28293 + list = (struct drm_buf_pub __user *) (request + 1);
28294
28295 if (__put_user(count, &request->count)
28296 || __put_user(list, &request->list))
28297 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28298 index 904d7e9..ab88581 100644
28299 --- a/drivers/gpu/drm/drm_ioctl.c
28300 +++ b/drivers/gpu/drm/drm_ioctl.c
28301 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28302 stats->data[i].value =
28303 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28304 else
28305 - stats->data[i].value = atomic_read(&dev->counts[i]);
28306 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28307 stats->data[i].type = dev->types[i];
28308 }
28309
28310 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28311 index 632ae24..244cf4a 100644
28312 --- a/drivers/gpu/drm/drm_lock.c
28313 +++ b/drivers/gpu/drm/drm_lock.c
28314 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28315 if (drm_lock_take(&master->lock, lock->context)) {
28316 master->lock.file_priv = file_priv;
28317 master->lock.lock_time = jiffies;
28318 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28319 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28320 break; /* Got lock */
28321 }
28322
28323 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28324 return -EINVAL;
28325 }
28326
28327 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28328 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28329
28330 if (drm_lock_free(&master->lock, lock->context)) {
28331 /* FIXME: Should really bail out here. */
28332 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28333 index 8f371e8..9f85d52 100644
28334 --- a/drivers/gpu/drm/i810/i810_dma.c
28335 +++ b/drivers/gpu/drm/i810/i810_dma.c
28336 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28337 dma->buflist[vertex->idx],
28338 vertex->discard, vertex->used);
28339
28340 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28341 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28342 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28343 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28344 sarea_priv->last_enqueue = dev_priv->counter - 1;
28345 sarea_priv->last_dispatch = (int)hw_status[5];
28346
28347 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28348 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28349 mc->last_render);
28350
28351 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28352 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28353 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28354 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28355 sarea_priv->last_enqueue = dev_priv->counter - 1;
28356 sarea_priv->last_dispatch = (int)hw_status[5];
28357
28358 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28359 index c9339f4..f5e1b9d 100644
28360 --- a/drivers/gpu/drm/i810/i810_drv.h
28361 +++ b/drivers/gpu/drm/i810/i810_drv.h
28362 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28363 int page_flipping;
28364
28365 wait_queue_head_t irq_queue;
28366 - atomic_t irq_received;
28367 - atomic_t irq_emitted;
28368 + atomic_unchecked_t irq_received;
28369 + atomic_unchecked_t irq_emitted;
28370
28371 int front_offset;
28372 } drm_i810_private_t;
28373 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28374 index b2e3c97..58cf079 100644
28375 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28376 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28377 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28378 I915_READ(GTIMR));
28379 }
28380 seq_printf(m, "Interrupts received: %d\n",
28381 - atomic_read(&dev_priv->irq_received));
28382 + atomic_read_unchecked(&dev_priv->irq_received));
28383 for (i = 0; i < I915_NUM_RINGS; i++) {
28384 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28385 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28386 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28387 return ret;
28388
28389 if (opregion->header)
28390 - seq_write(m, opregion->header, OPREGION_SIZE);
28391 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28392
28393 mutex_unlock(&dev->struct_mutex);
28394
28395 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28396 index c4da951..3c59c5c 100644
28397 --- a/drivers/gpu/drm/i915/i915_dma.c
28398 +++ b/drivers/gpu/drm/i915/i915_dma.c
28399 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28400 bool can_switch;
28401
28402 spin_lock(&dev->count_lock);
28403 - can_switch = (dev->open_count == 0);
28404 + can_switch = (local_read(&dev->open_count) == 0);
28405 spin_unlock(&dev->count_lock);
28406 return can_switch;
28407 }
28408 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28409 index ae294a0..1755461 100644
28410 --- a/drivers/gpu/drm/i915/i915_drv.h
28411 +++ b/drivers/gpu/drm/i915/i915_drv.h
28412 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28413 /* render clock increase/decrease */
28414 /* display clock increase/decrease */
28415 /* pll clock increase/decrease */
28416 -};
28417 +} __no_const;
28418
28419 struct intel_device_info {
28420 u8 gen;
28421 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28422 int current_page;
28423 int page_flipping;
28424
28425 - atomic_t irq_received;
28426 + atomic_unchecked_t irq_received;
28427
28428 /* protects the irq masks */
28429 spinlock_t irq_lock;
28430 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28431 * will be page flipped away on the next vblank. When it
28432 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28433 */
28434 - atomic_t pending_flip;
28435 + atomic_unchecked_t pending_flip;
28436 };
28437
28438 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28439 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28440 extern void intel_teardown_gmbus(struct drm_device *dev);
28441 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28442 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28443 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28444 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28445 {
28446 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28447 }
28448 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28449 index b9da890..cad1d98 100644
28450 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28451 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28452 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28453 i915_gem_clflush_object(obj);
28454
28455 if (obj->base.pending_write_domain)
28456 - cd->flips |= atomic_read(&obj->pending_flip);
28457 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28458
28459 /* The actual obj->write_domain will be updated with
28460 * pending_write_domain after we emit the accumulated flush for all
28461 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28462
28463 static int
28464 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28465 - int count)
28466 + unsigned int count)
28467 {
28468 - int i;
28469 + unsigned int i;
28470
28471 for (i = 0; i < count; i++) {
28472 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28473 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28474 index d47a53b..61154c2 100644
28475 --- a/drivers/gpu/drm/i915/i915_irq.c
28476 +++ b/drivers/gpu/drm/i915/i915_irq.c
28477 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28478 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28479 struct drm_i915_master_private *master_priv;
28480
28481 - atomic_inc(&dev_priv->irq_received);
28482 + atomic_inc_unchecked(&dev_priv->irq_received);
28483
28484 /* disable master interrupt before clearing iir */
28485 de_ier = I915_READ(DEIER);
28486 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28487 struct drm_i915_master_private *master_priv;
28488 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28489
28490 - atomic_inc(&dev_priv->irq_received);
28491 + atomic_inc_unchecked(&dev_priv->irq_received);
28492
28493 if (IS_GEN6(dev))
28494 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28495 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28496 int ret = IRQ_NONE, pipe;
28497 bool blc_event = false;
28498
28499 - atomic_inc(&dev_priv->irq_received);
28500 + atomic_inc_unchecked(&dev_priv->irq_received);
28501
28502 iir = I915_READ(IIR);
28503
28504 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28505 {
28506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28507
28508 - atomic_set(&dev_priv->irq_received, 0);
28509 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28510
28511 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28512 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28513 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28514 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28515 int pipe;
28516
28517 - atomic_set(&dev_priv->irq_received, 0);
28518 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28519
28520 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28521 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28522 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28523 index daa5743..c0757a9 100644
28524 --- a/drivers/gpu/drm/i915/intel_display.c
28525 +++ b/drivers/gpu/drm/i915/intel_display.c
28526 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28527
28528 wait_event(dev_priv->pending_flip_queue,
28529 atomic_read(&dev_priv->mm.wedged) ||
28530 - atomic_read(&obj->pending_flip) == 0);
28531 + atomic_read_unchecked(&obj->pending_flip) == 0);
28532
28533 /* Big Hammer, we also need to ensure that any pending
28534 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28535 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28536 obj = to_intel_framebuffer(crtc->fb)->obj;
28537 dev_priv = crtc->dev->dev_private;
28538 wait_event(dev_priv->pending_flip_queue,
28539 - atomic_read(&obj->pending_flip) == 0);
28540 + atomic_read_unchecked(&obj->pending_flip) == 0);
28541 }
28542
28543 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28544 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28545
28546 atomic_clear_mask(1 << intel_crtc->plane,
28547 &obj->pending_flip.counter);
28548 - if (atomic_read(&obj->pending_flip) == 0)
28549 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28550 wake_up(&dev_priv->pending_flip_queue);
28551
28552 schedule_work(&work->work);
28553 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28554 /* Block clients from rendering to the new back buffer until
28555 * the flip occurs and the object is no longer visible.
28556 */
28557 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28558 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28559
28560 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28561 if (ret)
28562 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28563 return 0;
28564
28565 cleanup_pending:
28566 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28567 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28568 drm_gem_object_unreference(&work->old_fb_obj->base);
28569 drm_gem_object_unreference(&obj->base);
28570 mutex_unlock(&dev->struct_mutex);
28571 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28572 index 54558a0..2d97005 100644
28573 --- a/drivers/gpu/drm/mga/mga_drv.h
28574 +++ b/drivers/gpu/drm/mga/mga_drv.h
28575 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28576 u32 clear_cmd;
28577 u32 maccess;
28578
28579 - atomic_t vbl_received; /**< Number of vblanks received. */
28580 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28581 wait_queue_head_t fence_queue;
28582 - atomic_t last_fence_retired;
28583 + atomic_unchecked_t last_fence_retired;
28584 u32 next_fence_to_post;
28585
28586 unsigned int fb_cpp;
28587 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28588 index 2581202..f230a8d9 100644
28589 --- a/drivers/gpu/drm/mga/mga_irq.c
28590 +++ b/drivers/gpu/drm/mga/mga_irq.c
28591 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28592 if (crtc != 0)
28593 return 0;
28594
28595 - return atomic_read(&dev_priv->vbl_received);
28596 + return atomic_read_unchecked(&dev_priv->vbl_received);
28597 }
28598
28599
28600 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28601 /* VBLANK interrupt */
28602 if (status & MGA_VLINEPEN) {
28603 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28604 - atomic_inc(&dev_priv->vbl_received);
28605 + atomic_inc_unchecked(&dev_priv->vbl_received);
28606 drm_handle_vblank(dev, 0);
28607 handled = 1;
28608 }
28609 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28610 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28611 MGA_WRITE(MGA_PRIMEND, prim_end);
28612
28613 - atomic_inc(&dev_priv->last_fence_retired);
28614 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28615 DRM_WAKEUP(&dev_priv->fence_queue);
28616 handled = 1;
28617 }
28618 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28619 * using fences.
28620 */
28621 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28622 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28623 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28624 - *sequence) <= (1 << 23)));
28625
28626 *sequence = cur_fence;
28627 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28628 index 5fc201b..7b032b9 100644
28629 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28630 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28631 @@ -201,7 +201,7 @@ struct methods {
28632 const char desc[8];
28633 void (*loadbios)(struct drm_device *, uint8_t *);
28634 const bool rw;
28635 -};
28636 +} __do_const;
28637
28638 static struct methods shadow_methods[] = {
28639 { "PRAMIN", load_vbios_pramin, true },
28640 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28641 struct bit_table {
28642 const char id;
28643 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28644 -};
28645 +} __no_const;
28646
28647 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28648
28649 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28650 index 4c0be3a..5757582 100644
28651 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28652 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28653 @@ -238,7 +238,7 @@ struct nouveau_channel {
28654 struct list_head pending;
28655 uint32_t sequence;
28656 uint32_t sequence_ack;
28657 - atomic_t last_sequence_irq;
28658 + atomic_unchecked_t last_sequence_irq;
28659 struct nouveau_vma vma;
28660 } fence;
28661
28662 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28663 u32 handle, u16 class);
28664 void (*set_tile_region)(struct drm_device *dev, int i);
28665 void (*tlb_flush)(struct drm_device *, int engine);
28666 -};
28667 +} __no_const;
28668
28669 struct nouveau_instmem_engine {
28670 void *priv;
28671 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28672 struct nouveau_mc_engine {
28673 int (*init)(struct drm_device *dev);
28674 void (*takedown)(struct drm_device *dev);
28675 -};
28676 +} __no_const;
28677
28678 struct nouveau_timer_engine {
28679 int (*init)(struct drm_device *dev);
28680 void (*takedown)(struct drm_device *dev);
28681 uint64_t (*read)(struct drm_device *dev);
28682 -};
28683 +} __no_const;
28684
28685 struct nouveau_fb_engine {
28686 int num_tiles;
28687 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28688 void (*put)(struct drm_device *, struct nouveau_mem **);
28689
28690 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28691 -};
28692 +} __no_const;
28693
28694 struct nouveau_engine {
28695 struct nouveau_instmem_engine instmem;
28696 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28697 struct drm_global_reference mem_global_ref;
28698 struct ttm_bo_global_ref bo_global_ref;
28699 struct ttm_bo_device bdev;
28700 - atomic_t validate_sequence;
28701 + atomic_unchecked_t validate_sequence;
28702 } ttm;
28703
28704 struct {
28705 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28706 index 2f6daae..c9d7b9e 100644
28707 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28708 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28709 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28710 if (USE_REFCNT(dev))
28711 sequence = nvchan_rd32(chan, 0x48);
28712 else
28713 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28714 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28715
28716 if (chan->fence.sequence_ack == sequence)
28717 goto out;
28718 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28719 return ret;
28720 }
28721
28722 - atomic_set(&chan->fence.last_sequence_irq, 0);
28723 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28724 return 0;
28725 }
28726
28727 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28728 index 7ce3fde..cb3ea04 100644
28729 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28730 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28731 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28732 int trycnt = 0;
28733 int ret, i;
28734
28735 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28736 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28737 retry:
28738 if (++trycnt > 100000) {
28739 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28740 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28741 index d8831ab..0ba8356 100644
28742 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28743 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28744 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28745 bool can_switch;
28746
28747 spin_lock(&dev->count_lock);
28748 - can_switch = (dev->open_count == 0);
28749 + can_switch = (local_read(&dev->open_count) == 0);
28750 spin_unlock(&dev->count_lock);
28751 return can_switch;
28752 }
28753 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28754 index dbdea8e..cd6eeeb 100644
28755 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28756 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28757 @@ -554,7 +554,7 @@ static int
28758 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28759 u32 class, u32 mthd, u32 data)
28760 {
28761 - atomic_set(&chan->fence.last_sequence_irq, data);
28762 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28763 return 0;
28764 }
28765
28766 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28767 index bcac90b..53bfc76 100644
28768 --- a/drivers/gpu/drm/r128/r128_cce.c
28769 +++ b/drivers/gpu/drm/r128/r128_cce.c
28770 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28771
28772 /* GH: Simple idle check.
28773 */
28774 - atomic_set(&dev_priv->idle_count, 0);
28775 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28776
28777 /* We don't support anything other than bus-mastering ring mode,
28778 * but the ring can be in either AGP or PCI space for the ring
28779 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28780 index 930c71b..499aded 100644
28781 --- a/drivers/gpu/drm/r128/r128_drv.h
28782 +++ b/drivers/gpu/drm/r128/r128_drv.h
28783 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28784 int is_pci;
28785 unsigned long cce_buffers_offset;
28786
28787 - atomic_t idle_count;
28788 + atomic_unchecked_t idle_count;
28789
28790 int page_flipping;
28791 int current_page;
28792 u32 crtc_offset;
28793 u32 crtc_offset_cntl;
28794
28795 - atomic_t vbl_received;
28796 + atomic_unchecked_t vbl_received;
28797
28798 u32 color_fmt;
28799 unsigned int front_offset;
28800 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28801 index 429d5a0..7e899ed 100644
28802 --- a/drivers/gpu/drm/r128/r128_irq.c
28803 +++ b/drivers/gpu/drm/r128/r128_irq.c
28804 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28805 if (crtc != 0)
28806 return 0;
28807
28808 - return atomic_read(&dev_priv->vbl_received);
28809 + return atomic_read_unchecked(&dev_priv->vbl_received);
28810 }
28811
28812 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28813 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28814 /* VBLANK interrupt */
28815 if (status & R128_CRTC_VBLANK_INT) {
28816 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28817 - atomic_inc(&dev_priv->vbl_received);
28818 + atomic_inc_unchecked(&dev_priv->vbl_received);
28819 drm_handle_vblank(dev, 0);
28820 return IRQ_HANDLED;
28821 }
28822 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28823 index a9e33ce..09edd4b 100644
28824 --- a/drivers/gpu/drm/r128/r128_state.c
28825 +++ b/drivers/gpu/drm/r128/r128_state.c
28826 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28827
28828 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28829 {
28830 - if (atomic_read(&dev_priv->idle_count) == 0)
28831 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28832 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28833 else
28834 - atomic_set(&dev_priv->idle_count, 0);
28835 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28836 }
28837
28838 #endif
28839 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28840 index 5a82b6b..9e69c73 100644
28841 --- a/drivers/gpu/drm/radeon/mkregtable.c
28842 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28843 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28844 regex_t mask_rex;
28845 regmatch_t match[4];
28846 char buf[1024];
28847 - size_t end;
28848 + long end;
28849 int len;
28850 int done = 0;
28851 int r;
28852 unsigned o;
28853 struct offset *offset;
28854 char last_reg_s[10];
28855 - int last_reg;
28856 + unsigned long last_reg;
28857
28858 if (regcomp
28859 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28860 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28861 index 8227e76..ce0b195 100644
28862 --- a/drivers/gpu/drm/radeon/radeon.h
28863 +++ b/drivers/gpu/drm/radeon/radeon.h
28864 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28865 */
28866 struct radeon_fence_driver {
28867 uint32_t scratch_reg;
28868 - atomic_t seq;
28869 + atomic_unchecked_t seq;
28870 uint32_t last_seq;
28871 unsigned long last_jiffies;
28872 unsigned long last_timeout;
28873 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28874 int x2, int y2);
28875 void (*draw_auto)(struct radeon_device *rdev);
28876 void (*set_default_state)(struct radeon_device *rdev);
28877 -};
28878 +} __no_const;
28879
28880 struct r600_blit {
28881 struct mutex mutex;
28882 @@ -954,7 +954,7 @@ struct radeon_asic {
28883 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28884 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28885 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28886 -};
28887 +} __no_const;
28888
28889 /*
28890 * Asic structures
28891 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28892 index 9231564..78b00fd 100644
28893 --- a/drivers/gpu/drm/radeon/radeon_device.c
28894 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28895 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28896 bool can_switch;
28897
28898 spin_lock(&dev->count_lock);
28899 - can_switch = (dev->open_count == 0);
28900 + can_switch = (local_read(&dev->open_count) == 0);
28901 spin_unlock(&dev->count_lock);
28902 return can_switch;
28903 }
28904 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28905 index a1b59ca..86f2d44 100644
28906 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28907 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28908 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28909
28910 /* SW interrupt */
28911 wait_queue_head_t swi_queue;
28912 - atomic_t swi_emitted;
28913 + atomic_unchecked_t swi_emitted;
28914 int vblank_crtc;
28915 uint32_t irq_enable_reg;
28916 uint32_t r500_disp_irq_reg;
28917 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28918 index 76ec0e9..6feb1a3 100644
28919 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28920 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28921 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28922 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28923 return 0;
28924 }
28925 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28926 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28927 if (!rdev->cp.ready)
28928 /* FIXME: cp is not running assume everythings is done right
28929 * away
28930 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28931 return r;
28932 }
28933 radeon_fence_write(rdev, 0);
28934 - atomic_set(&rdev->fence_drv.seq, 0);
28935 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28936 INIT_LIST_HEAD(&rdev->fence_drv.created);
28937 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28938 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28939 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28940 index 48b7cea..342236f 100644
28941 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28942 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28943 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28944 request = compat_alloc_user_space(sizeof(*request));
28945 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28946 || __put_user(req32.param, &request->param)
28947 - || __put_user((void __user *)(unsigned long)req32.value,
28948 + || __put_user((unsigned long)req32.value,
28949 &request->value))
28950 return -EFAULT;
28951
28952 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28953 index 00da384..32f972d 100644
28954 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28955 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28956 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28957 unsigned int ret;
28958 RING_LOCALS;
28959
28960 - atomic_inc(&dev_priv->swi_emitted);
28961 - ret = atomic_read(&dev_priv->swi_emitted);
28962 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28963 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28964
28965 BEGIN_RING(4);
28966 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28967 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28968 drm_radeon_private_t *dev_priv =
28969 (drm_radeon_private_t *) dev->dev_private;
28970
28971 - atomic_set(&dev_priv->swi_emitted, 0);
28972 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28973 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28974
28975 dev->max_vblank_count = 0x001fffff;
28976 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28977 index e8422ae..d22d4a8 100644
28978 --- a/drivers/gpu/drm/radeon/radeon_state.c
28979 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28980 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28981 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28982 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28983
28984 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28985 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28986 sarea_priv->nbox * sizeof(depth_boxes[0])))
28987 return -EFAULT;
28988
28989 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28990 {
28991 drm_radeon_private_t *dev_priv = dev->dev_private;
28992 drm_radeon_getparam_t *param = data;
28993 - int value;
28994 + int value = 0;
28995
28996 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28997
28998 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28999 index 0b5468b..9c4b308 100644
29000 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
29001 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
29002 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29003 }
29004 if (unlikely(ttm_vm_ops == NULL)) {
29005 ttm_vm_ops = vma->vm_ops;
29006 - radeon_ttm_vm_ops = *ttm_vm_ops;
29007 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29008 + pax_open_kernel();
29009 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
29010 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29011 + pax_close_kernel();
29012 }
29013 vma->vm_ops = &radeon_ttm_vm_ops;
29014 return 0;
29015 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
29016 index a9049ed..501f284 100644
29017 --- a/drivers/gpu/drm/radeon/rs690.c
29018 +++ b/drivers/gpu/drm/radeon/rs690.c
29019 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
29020 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29021 rdev->pm.sideport_bandwidth.full)
29022 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29023 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
29024 + read_delay_latency.full = dfixed_const(800 * 1000);
29025 read_delay_latency.full = dfixed_div(read_delay_latency,
29026 rdev->pm.igp_sideport_mclk);
29027 + a.full = dfixed_const(370);
29028 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
29029 } else {
29030 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29031 rdev->pm.k8_bandwidth.full)
29032 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29033 index 727e93d..1565650 100644
29034 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
29035 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
29036 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
29037 static int ttm_pool_mm_shrink(struct shrinker *shrink,
29038 struct shrink_control *sc)
29039 {
29040 - static atomic_t start_pool = ATOMIC_INIT(0);
29041 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
29042 unsigned i;
29043 - unsigned pool_offset = atomic_add_return(1, &start_pool);
29044 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
29045 struct ttm_page_pool *pool;
29046 int shrink_pages = sc->nr_to_scan;
29047
29048 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
29049 index 9cf87d9..2000b7d 100644
29050 --- a/drivers/gpu/drm/via/via_drv.h
29051 +++ b/drivers/gpu/drm/via/via_drv.h
29052 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29053 typedef uint32_t maskarray_t[5];
29054
29055 typedef struct drm_via_irq {
29056 - atomic_t irq_received;
29057 + atomic_unchecked_t irq_received;
29058 uint32_t pending_mask;
29059 uint32_t enable_mask;
29060 wait_queue_head_t irq_queue;
29061 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29062 struct timeval last_vblank;
29063 int last_vblank_valid;
29064 unsigned usec_per_vblank;
29065 - atomic_t vbl_received;
29066 + atomic_unchecked_t vbl_received;
29067 drm_via_state_t hc_state;
29068 char pci_buf[VIA_PCI_BUF_SIZE];
29069 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29070 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
29071 index d391f48..10c8ca3 100644
29072 --- a/drivers/gpu/drm/via/via_irq.c
29073 +++ b/drivers/gpu/drm/via/via_irq.c
29074 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
29075 if (crtc != 0)
29076 return 0;
29077
29078 - return atomic_read(&dev_priv->vbl_received);
29079 + return atomic_read_unchecked(&dev_priv->vbl_received);
29080 }
29081
29082 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29083 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29084
29085 status = VIA_READ(VIA_REG_INTERRUPT);
29086 if (status & VIA_IRQ_VBLANK_PENDING) {
29087 - atomic_inc(&dev_priv->vbl_received);
29088 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29089 + atomic_inc_unchecked(&dev_priv->vbl_received);
29090 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29091 do_gettimeofday(&cur_vblank);
29092 if (dev_priv->last_vblank_valid) {
29093 dev_priv->usec_per_vblank =
29094 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29095 dev_priv->last_vblank = cur_vblank;
29096 dev_priv->last_vblank_valid = 1;
29097 }
29098 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29099 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29100 DRM_DEBUG("US per vblank is: %u\n",
29101 dev_priv->usec_per_vblank);
29102 }
29103 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29104
29105 for (i = 0; i < dev_priv->num_irqs; ++i) {
29106 if (status & cur_irq->pending_mask) {
29107 - atomic_inc(&cur_irq->irq_received);
29108 + atomic_inc_unchecked(&cur_irq->irq_received);
29109 DRM_WAKEUP(&cur_irq->irq_queue);
29110 handled = 1;
29111 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
29112 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
29113 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29114 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29115 masks[irq][4]));
29116 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29117 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29118 } else {
29119 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29120 (((cur_irq_sequence =
29121 - atomic_read(&cur_irq->irq_received)) -
29122 + atomic_read_unchecked(&cur_irq->irq_received)) -
29123 *sequence) <= (1 << 23)));
29124 }
29125 *sequence = cur_irq_sequence;
29126 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
29127 }
29128
29129 for (i = 0; i < dev_priv->num_irqs; ++i) {
29130 - atomic_set(&cur_irq->irq_received, 0);
29131 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29132 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29133 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29134 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29135 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
29136 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29137 case VIA_IRQ_RELATIVE:
29138 irqwait->request.sequence +=
29139 - atomic_read(&cur_irq->irq_received);
29140 + atomic_read_unchecked(&cur_irq->irq_received);
29141 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29142 case VIA_IRQ_ABSOLUTE:
29143 break;
29144 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29145 index dc27970..f18b008 100644
29146 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29147 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
29148 @@ -260,7 +260,7 @@ struct vmw_private {
29149 * Fencing and IRQs.
29150 */
29151
29152 - atomic_t marker_seq;
29153 + atomic_unchecked_t marker_seq;
29154 wait_queue_head_t fence_queue;
29155 wait_queue_head_t fifo_queue;
29156 int fence_queue_waiters; /* Protected by hw_mutex */
29157 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29158 index a0c2f12..68ae6cb 100644
29159 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29160 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29161 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29162 (unsigned int) min,
29163 (unsigned int) fifo->capabilities);
29164
29165 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29166 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
29167 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
29168 vmw_marker_queue_init(&fifo->marker_queue);
29169 return vmw_fifo_send_fence(dev_priv, &dummy);
29170 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29171 if (reserveable)
29172 iowrite32(bytes, fifo_mem +
29173 SVGA_FIFO_RESERVED);
29174 - return fifo_mem + (next_cmd >> 2);
29175 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29176 } else {
29177 need_bounce = true;
29178 }
29179 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29180
29181 fm = vmw_fifo_reserve(dev_priv, bytes);
29182 if (unlikely(fm == NULL)) {
29183 - *seqno = atomic_read(&dev_priv->marker_seq);
29184 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29185 ret = -ENOMEM;
29186 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29187 false, 3*HZ);
29188 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29189 }
29190
29191 do {
29192 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29193 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29194 } while (*seqno == 0);
29195
29196 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29197 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29198 index cabc95f..14b3d77 100644
29199 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29200 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29201 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29202 * emitted. Then the fence is stale and signaled.
29203 */
29204
29205 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29206 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29207 > VMW_FENCE_WRAP);
29208
29209 return ret;
29210 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29211
29212 if (fifo_idle)
29213 down_read(&fifo_state->rwsem);
29214 - signal_seq = atomic_read(&dev_priv->marker_seq);
29215 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29216 ret = 0;
29217
29218 for (;;) {
29219 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29220 index 8a8725c..afed796 100644
29221 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29222 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29223 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29224 while (!vmw_lag_lt(queue, us)) {
29225 spin_lock(&queue->lock);
29226 if (list_empty(&queue->head))
29227 - seqno = atomic_read(&dev_priv->marker_seq);
29228 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29229 else {
29230 marker = list_first_entry(&queue->head,
29231 struct vmw_marker, head);
29232 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29233 index bb656d8..4169fca 100644
29234 --- a/drivers/hid/hid-core.c
29235 +++ b/drivers/hid/hid-core.c
29236 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29237
29238 int hid_add_device(struct hid_device *hdev)
29239 {
29240 - static atomic_t id = ATOMIC_INIT(0);
29241 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29242 int ret;
29243
29244 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29245 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29246 /* XXX hack, any other cleaner solution after the driver core
29247 * is converted to allow more than 20 bytes as the device name? */
29248 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29249 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29250 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29251
29252 hid_debug_register(hdev, dev_name(&hdev->dev));
29253 ret = device_add(&hdev->dev);
29254 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29255 index 4ef02b2..8a96831 100644
29256 --- a/drivers/hid/usbhid/hiddev.c
29257 +++ b/drivers/hid/usbhid/hiddev.c
29258 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29259 break;
29260
29261 case HIDIOCAPPLICATION:
29262 - if (arg < 0 || arg >= hid->maxapplication)
29263 + if (arg >= hid->maxapplication)
29264 break;
29265
29266 for (i = 0; i < hid->maxcollection; i++)
29267 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29268 index 4065374..10ed7dc 100644
29269 --- a/drivers/hv/channel.c
29270 +++ b/drivers/hv/channel.c
29271 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29272 int ret = 0;
29273 int t;
29274
29275 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29276 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29277 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29278 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29279
29280 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29281 if (ret)
29282 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29283 index 0fb100e..baf87e5 100644
29284 --- a/drivers/hv/hv.c
29285 +++ b/drivers/hv/hv.c
29286 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29287 u64 output_address = (output) ? virt_to_phys(output) : 0;
29288 u32 output_address_hi = output_address >> 32;
29289 u32 output_address_lo = output_address & 0xFFFFFFFF;
29290 - void *hypercall_page = hv_context.hypercall_page;
29291 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29292
29293 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29294 "=a"(hv_status_lo) : "d" (control_hi),
29295 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29296 index 0aee112..b72d21f 100644
29297 --- a/drivers/hv/hyperv_vmbus.h
29298 +++ b/drivers/hv/hyperv_vmbus.h
29299 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29300 struct vmbus_connection {
29301 enum vmbus_connect_state conn_state;
29302
29303 - atomic_t next_gpadl_handle;
29304 + atomic_unchecked_t next_gpadl_handle;
29305
29306 /*
29307 * Represents channel interrupts. Each bit position represents a
29308 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29309 index d2d0a2a..90b8f4d 100644
29310 --- a/drivers/hv/vmbus_drv.c
29311 +++ b/drivers/hv/vmbus_drv.c
29312 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29313 {
29314 int ret = 0;
29315
29316 - static atomic_t device_num = ATOMIC_INIT(0);
29317 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29318
29319 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29320 - atomic_inc_return(&device_num));
29321 + atomic_inc_return_unchecked(&device_num));
29322
29323 child_device_obj->device.bus = &hv_bus;
29324 child_device_obj->device.parent = &hv_acpi_dev->dev;
29325 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29326 index 66f6729..2d6de0a 100644
29327 --- a/drivers/hwmon/acpi_power_meter.c
29328 +++ b/drivers/hwmon/acpi_power_meter.c
29329 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29330 return res;
29331
29332 temp /= 1000;
29333 - if (temp < 0)
29334 - return -EINVAL;
29335
29336 mutex_lock(&resource->lock);
29337 resource->trip[attr->index - 7] = temp;
29338 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29339 index 5357925..6cf0418 100644
29340 --- a/drivers/hwmon/sht15.c
29341 +++ b/drivers/hwmon/sht15.c
29342 @@ -166,7 +166,7 @@ struct sht15_data {
29343 int supply_uV;
29344 bool supply_uV_valid;
29345 struct work_struct update_supply_work;
29346 - atomic_t interrupt_handled;
29347 + atomic_unchecked_t interrupt_handled;
29348 };
29349
29350 /**
29351 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29352 return ret;
29353
29354 gpio_direction_input(data->pdata->gpio_data);
29355 - atomic_set(&data->interrupt_handled, 0);
29356 + atomic_set_unchecked(&data->interrupt_handled, 0);
29357
29358 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29359 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29360 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29361 /* Only relevant if the interrupt hasn't occurred. */
29362 - if (!atomic_read(&data->interrupt_handled))
29363 + if (!atomic_read_unchecked(&data->interrupt_handled))
29364 schedule_work(&data->read_work);
29365 }
29366 ret = wait_event_timeout(data->wait_queue,
29367 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29368
29369 /* First disable the interrupt */
29370 disable_irq_nosync(irq);
29371 - atomic_inc(&data->interrupt_handled);
29372 + atomic_inc_unchecked(&data->interrupt_handled);
29373 /* Then schedule a reading work struct */
29374 if (data->state != SHT15_READING_NOTHING)
29375 schedule_work(&data->read_work);
29376 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29377 * If not, then start the interrupt again - care here as could
29378 * have gone low in meantime so verify it hasn't!
29379 */
29380 - atomic_set(&data->interrupt_handled, 0);
29381 + atomic_set_unchecked(&data->interrupt_handled, 0);
29382 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29383 /* If still not occurred or another handler has been scheduled */
29384 if (gpio_get_value(data->pdata->gpio_data)
29385 - || atomic_read(&data->interrupt_handled))
29386 + || atomic_read_unchecked(&data->interrupt_handled))
29387 return;
29388 }
29389
29390 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29391 index 378fcb5..5e91fa8 100644
29392 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29393 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29394 @@ -43,7 +43,7 @@
29395 extern struct i2c_adapter amd756_smbus;
29396
29397 static struct i2c_adapter *s4882_adapter;
29398 -static struct i2c_algorithm *s4882_algo;
29399 +static i2c_algorithm_no_const *s4882_algo;
29400
29401 /* Wrapper access functions for multiplexed SMBus */
29402 static DEFINE_MUTEX(amd756_lock);
29403 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29404 index 29015eb..af2d8e9 100644
29405 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29406 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29407 @@ -41,7 +41,7 @@
29408 extern struct i2c_adapter *nforce2_smbus;
29409
29410 static struct i2c_adapter *s4985_adapter;
29411 -static struct i2c_algorithm *s4985_algo;
29412 +static i2c_algorithm_no_const *s4985_algo;
29413
29414 /* Wrapper access functions for multiplexed SMBus */
29415 static DEFINE_MUTEX(nforce2_lock);
29416 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29417 index d7a4833..7fae376 100644
29418 --- a/drivers/i2c/i2c-mux.c
29419 +++ b/drivers/i2c/i2c-mux.c
29420 @@ -28,7 +28,7 @@
29421 /* multiplexer per channel data */
29422 struct i2c_mux_priv {
29423 struct i2c_adapter adap;
29424 - struct i2c_algorithm algo;
29425 + i2c_algorithm_no_const algo;
29426
29427 struct i2c_adapter *parent;
29428 void *mux_dev; /* the mux chip/device */
29429 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29430 index 57d00ca..0145194 100644
29431 --- a/drivers/ide/aec62xx.c
29432 +++ b/drivers/ide/aec62xx.c
29433 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29434 .cable_detect = atp86x_cable_detect,
29435 };
29436
29437 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29438 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29439 { /* 0: AEC6210 */
29440 .name = DRV_NAME,
29441 .init_chipset = init_chipset_aec62xx,
29442 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29443 index 2c8016a..911a27c 100644
29444 --- a/drivers/ide/alim15x3.c
29445 +++ b/drivers/ide/alim15x3.c
29446 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29447 .dma_sff_read_status = ide_dma_sff_read_status,
29448 };
29449
29450 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29451 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29452 .name = DRV_NAME,
29453 .init_chipset = init_chipset_ali15x3,
29454 .init_hwif = init_hwif_ali15x3,
29455 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29456 index 3747b25..56fc995 100644
29457 --- a/drivers/ide/amd74xx.c
29458 +++ b/drivers/ide/amd74xx.c
29459 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29460 .udma_mask = udma, \
29461 }
29462
29463 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29464 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29465 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29466 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29467 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29468 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29469 index 15f0ead..cb43480 100644
29470 --- a/drivers/ide/atiixp.c
29471 +++ b/drivers/ide/atiixp.c
29472 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29473 .cable_detect = atiixp_cable_detect,
29474 };
29475
29476 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29477 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29478 { /* 0: IXP200/300/400/700 */
29479 .name = DRV_NAME,
29480 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29481 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29482 index 5f80312..d1fc438 100644
29483 --- a/drivers/ide/cmd64x.c
29484 +++ b/drivers/ide/cmd64x.c
29485 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29486 .dma_sff_read_status = ide_dma_sff_read_status,
29487 };
29488
29489 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29490 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29491 { /* 0: CMD643 */
29492 .name = DRV_NAME,
29493 .init_chipset = init_chipset_cmd64x,
29494 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29495 index 2c1e5f7..1444762 100644
29496 --- a/drivers/ide/cs5520.c
29497 +++ b/drivers/ide/cs5520.c
29498 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29499 .set_dma_mode = cs5520_set_dma_mode,
29500 };
29501
29502 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29503 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29504 .name = DRV_NAME,
29505 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29506 .port_ops = &cs5520_port_ops,
29507 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29508 index 4dc4eb9..49b40ad 100644
29509 --- a/drivers/ide/cs5530.c
29510 +++ b/drivers/ide/cs5530.c
29511 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29512 .udma_filter = cs5530_udma_filter,
29513 };
29514
29515 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29516 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29517 .name = DRV_NAME,
29518 .init_chipset = init_chipset_cs5530,
29519 .init_hwif = init_hwif_cs5530,
29520 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29521 index 5059faf..18d4c85 100644
29522 --- a/drivers/ide/cs5535.c
29523 +++ b/drivers/ide/cs5535.c
29524 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29525 .cable_detect = cs5535_cable_detect,
29526 };
29527
29528 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29529 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29530 .name = DRV_NAME,
29531 .port_ops = &cs5535_port_ops,
29532 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29533 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29534 index 847553f..3ffb49d 100644
29535 --- a/drivers/ide/cy82c693.c
29536 +++ b/drivers/ide/cy82c693.c
29537 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29538 .set_dma_mode = cy82c693_set_dma_mode,
29539 };
29540
29541 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29542 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29543 .name = DRV_NAME,
29544 .init_iops = init_iops_cy82c693,
29545 .port_ops = &cy82c693_port_ops,
29546 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29547 index 58c51cd..4aec3b8 100644
29548 --- a/drivers/ide/hpt366.c
29549 +++ b/drivers/ide/hpt366.c
29550 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29551 }
29552 };
29553
29554 -static const struct hpt_info hpt36x __devinitdata = {
29555 +static const struct hpt_info hpt36x __devinitconst = {
29556 .chip_name = "HPT36x",
29557 .chip_type = HPT36x,
29558 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29559 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29560 .timings = &hpt36x_timings
29561 };
29562
29563 -static const struct hpt_info hpt370 __devinitdata = {
29564 +static const struct hpt_info hpt370 __devinitconst = {
29565 .chip_name = "HPT370",
29566 .chip_type = HPT370,
29567 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29568 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29569 .timings = &hpt37x_timings
29570 };
29571
29572 -static const struct hpt_info hpt370a __devinitdata = {
29573 +static const struct hpt_info hpt370a __devinitconst = {
29574 .chip_name = "HPT370A",
29575 .chip_type = HPT370A,
29576 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29577 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29578 .timings = &hpt37x_timings
29579 };
29580
29581 -static const struct hpt_info hpt374 __devinitdata = {
29582 +static const struct hpt_info hpt374 __devinitconst = {
29583 .chip_name = "HPT374",
29584 .chip_type = HPT374,
29585 .udma_mask = ATA_UDMA5,
29586 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29587 .timings = &hpt37x_timings
29588 };
29589
29590 -static const struct hpt_info hpt372 __devinitdata = {
29591 +static const struct hpt_info hpt372 __devinitconst = {
29592 .chip_name = "HPT372",
29593 .chip_type = HPT372,
29594 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29595 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29596 .timings = &hpt37x_timings
29597 };
29598
29599 -static const struct hpt_info hpt372a __devinitdata = {
29600 +static const struct hpt_info hpt372a __devinitconst = {
29601 .chip_name = "HPT372A",
29602 .chip_type = HPT372A,
29603 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29604 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29605 .timings = &hpt37x_timings
29606 };
29607
29608 -static const struct hpt_info hpt302 __devinitdata = {
29609 +static const struct hpt_info hpt302 __devinitconst = {
29610 .chip_name = "HPT302",
29611 .chip_type = HPT302,
29612 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29613 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29614 .timings = &hpt37x_timings
29615 };
29616
29617 -static const struct hpt_info hpt371 __devinitdata = {
29618 +static const struct hpt_info hpt371 __devinitconst = {
29619 .chip_name = "HPT371",
29620 .chip_type = HPT371,
29621 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29622 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29623 .timings = &hpt37x_timings
29624 };
29625
29626 -static const struct hpt_info hpt372n __devinitdata = {
29627 +static const struct hpt_info hpt372n __devinitconst = {
29628 .chip_name = "HPT372N",
29629 .chip_type = HPT372N,
29630 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29631 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29632 .timings = &hpt37x_timings
29633 };
29634
29635 -static const struct hpt_info hpt302n __devinitdata = {
29636 +static const struct hpt_info hpt302n __devinitconst = {
29637 .chip_name = "HPT302N",
29638 .chip_type = HPT302N,
29639 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29640 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29641 .timings = &hpt37x_timings
29642 };
29643
29644 -static const struct hpt_info hpt371n __devinitdata = {
29645 +static const struct hpt_info hpt371n __devinitconst = {
29646 .chip_name = "HPT371N",
29647 .chip_type = HPT371N,
29648 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29649 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29650 .dma_sff_read_status = ide_dma_sff_read_status,
29651 };
29652
29653 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29654 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29655 { /* 0: HPT36x */
29656 .name = DRV_NAME,
29657 .init_chipset = init_chipset_hpt366,
29658 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29659 index 8126824..55a2798 100644
29660 --- a/drivers/ide/ide-cd.c
29661 +++ b/drivers/ide/ide-cd.c
29662 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29663 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29664 if ((unsigned long)buf & alignment
29665 || blk_rq_bytes(rq) & q->dma_pad_mask
29666 - || object_is_on_stack(buf))
29667 + || object_starts_on_stack(buf))
29668 drive->dma = 0;
29669 }
29670 }
29671 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29672 index a743e68..1cfd674 100644
29673 --- a/drivers/ide/ide-pci-generic.c
29674 +++ b/drivers/ide/ide-pci-generic.c
29675 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29676 .udma_mask = ATA_UDMA6, \
29677 }
29678
29679 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29680 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29681 /* 0: Unknown */
29682 DECLARE_GENERIC_PCI_DEV(0),
29683
29684 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29685 index 560e66d..d5dd180 100644
29686 --- a/drivers/ide/it8172.c
29687 +++ b/drivers/ide/it8172.c
29688 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29689 .set_dma_mode = it8172_set_dma_mode,
29690 };
29691
29692 -static const struct ide_port_info it8172_port_info __devinitdata = {
29693 +static const struct ide_port_info it8172_port_info __devinitconst = {
29694 .name = DRV_NAME,
29695 .port_ops = &it8172_port_ops,
29696 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29697 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29698 index 46816ba..1847aeb 100644
29699 --- a/drivers/ide/it8213.c
29700 +++ b/drivers/ide/it8213.c
29701 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29702 .cable_detect = it8213_cable_detect,
29703 };
29704
29705 -static const struct ide_port_info it8213_chipset __devinitdata = {
29706 +static const struct ide_port_info it8213_chipset __devinitconst = {
29707 .name = DRV_NAME,
29708 .enablebits = { {0x41, 0x80, 0x80} },
29709 .port_ops = &it8213_port_ops,
29710 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29711 index 2e3169f..c5611db 100644
29712 --- a/drivers/ide/it821x.c
29713 +++ b/drivers/ide/it821x.c
29714 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29715 .cable_detect = it821x_cable_detect,
29716 };
29717
29718 -static const struct ide_port_info it821x_chipset __devinitdata = {
29719 +static const struct ide_port_info it821x_chipset __devinitconst = {
29720 .name = DRV_NAME,
29721 .init_chipset = init_chipset_it821x,
29722 .init_hwif = init_hwif_it821x,
29723 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29724 index 74c2c4a..efddd7d 100644
29725 --- a/drivers/ide/jmicron.c
29726 +++ b/drivers/ide/jmicron.c
29727 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29728 .cable_detect = jmicron_cable_detect,
29729 };
29730
29731 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29732 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29733 .name = DRV_NAME,
29734 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29735 .port_ops = &jmicron_port_ops,
29736 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29737 index 95327a2..73f78d8 100644
29738 --- a/drivers/ide/ns87415.c
29739 +++ b/drivers/ide/ns87415.c
29740 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29741 .dma_sff_read_status = superio_dma_sff_read_status,
29742 };
29743
29744 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29745 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29746 .name = DRV_NAME,
29747 .init_hwif = init_hwif_ns87415,
29748 .tp_ops = &ns87415_tp_ops,
29749 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29750 index 1a53a4c..39edc66 100644
29751 --- a/drivers/ide/opti621.c
29752 +++ b/drivers/ide/opti621.c
29753 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29754 .set_pio_mode = opti621_set_pio_mode,
29755 };
29756
29757 -static const struct ide_port_info opti621_chipset __devinitdata = {
29758 +static const struct ide_port_info opti621_chipset __devinitconst = {
29759 .name = DRV_NAME,
29760 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29761 .port_ops = &opti621_port_ops,
29762 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29763 index 9546fe2..2e5ceb6 100644
29764 --- a/drivers/ide/pdc202xx_new.c
29765 +++ b/drivers/ide/pdc202xx_new.c
29766 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29767 .udma_mask = udma, \
29768 }
29769
29770 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29771 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29772 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29773 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29774 };
29775 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29776 index 3a35ec6..5634510 100644
29777 --- a/drivers/ide/pdc202xx_old.c
29778 +++ b/drivers/ide/pdc202xx_old.c
29779 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29780 .max_sectors = sectors, \
29781 }
29782
29783 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29784 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29785 { /* 0: PDC20246 */
29786 .name = DRV_NAME,
29787 .init_chipset = init_chipset_pdc202xx,
29788 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29789 index 1892e81..fe0fd60 100644
29790 --- a/drivers/ide/piix.c
29791 +++ b/drivers/ide/piix.c
29792 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29793 .udma_mask = udma, \
29794 }
29795
29796 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29797 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29798 /* 0: MPIIX */
29799 { /*
29800 * MPIIX actually has only a single IDE channel mapped to
29801 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29802 index a6414a8..c04173e 100644
29803 --- a/drivers/ide/rz1000.c
29804 +++ b/drivers/ide/rz1000.c
29805 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29806 }
29807 }
29808
29809 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29810 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29811 .name = DRV_NAME,
29812 .host_flags = IDE_HFLAG_NO_DMA,
29813 };
29814 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29815 index 356b9b5..d4758eb 100644
29816 --- a/drivers/ide/sc1200.c
29817 +++ b/drivers/ide/sc1200.c
29818 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29819 .dma_sff_read_status = ide_dma_sff_read_status,
29820 };
29821
29822 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29823 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29824 .name = DRV_NAME,
29825 .port_ops = &sc1200_port_ops,
29826 .dma_ops = &sc1200_dma_ops,
29827 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29828 index b7f5b0c..9701038 100644
29829 --- a/drivers/ide/scc_pata.c
29830 +++ b/drivers/ide/scc_pata.c
29831 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29832 .dma_sff_read_status = scc_dma_sff_read_status,
29833 };
29834
29835 -static const struct ide_port_info scc_chipset __devinitdata = {
29836 +static const struct ide_port_info scc_chipset __devinitconst = {
29837 .name = "sccIDE",
29838 .init_iops = init_iops_scc,
29839 .init_dma = scc_init_dma,
29840 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29841 index 35fb8da..24d72ef 100644
29842 --- a/drivers/ide/serverworks.c
29843 +++ b/drivers/ide/serverworks.c
29844 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29845 .cable_detect = svwks_cable_detect,
29846 };
29847
29848 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29849 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29850 { /* 0: OSB4 */
29851 .name = DRV_NAME,
29852 .init_chipset = init_chipset_svwks,
29853 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29854 index ddeda44..46f7e30 100644
29855 --- a/drivers/ide/siimage.c
29856 +++ b/drivers/ide/siimage.c
29857 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29858 .udma_mask = ATA_UDMA6, \
29859 }
29860
29861 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29862 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29863 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29864 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29865 };
29866 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29867 index 4a00225..09e61b4 100644
29868 --- a/drivers/ide/sis5513.c
29869 +++ b/drivers/ide/sis5513.c
29870 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29871 .cable_detect = sis_cable_detect,
29872 };
29873
29874 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29875 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29876 .name = DRV_NAME,
29877 .init_chipset = init_chipset_sis5513,
29878 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29879 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29880 index f21dc2a..d051cd2 100644
29881 --- a/drivers/ide/sl82c105.c
29882 +++ b/drivers/ide/sl82c105.c
29883 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29884 .dma_sff_read_status = ide_dma_sff_read_status,
29885 };
29886
29887 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29888 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29889 .name = DRV_NAME,
29890 .init_chipset = init_chipset_sl82c105,
29891 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29892 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29893 index 864ffe0..863a5e9 100644
29894 --- a/drivers/ide/slc90e66.c
29895 +++ b/drivers/ide/slc90e66.c
29896 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29897 .cable_detect = slc90e66_cable_detect,
29898 };
29899
29900 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29901 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29902 .name = DRV_NAME,
29903 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29904 .port_ops = &slc90e66_port_ops,
29905 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29906 index 4799d5c..1794678 100644
29907 --- a/drivers/ide/tc86c001.c
29908 +++ b/drivers/ide/tc86c001.c
29909 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29910 .dma_sff_read_status = ide_dma_sff_read_status,
29911 };
29912
29913 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29914 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29915 .name = DRV_NAME,
29916 .init_hwif = init_hwif_tc86c001,
29917 .port_ops = &tc86c001_port_ops,
29918 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29919 index 281c914..55ce1b8 100644
29920 --- a/drivers/ide/triflex.c
29921 +++ b/drivers/ide/triflex.c
29922 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29923 .set_dma_mode = triflex_set_mode,
29924 };
29925
29926 -static const struct ide_port_info triflex_device __devinitdata = {
29927 +static const struct ide_port_info triflex_device __devinitconst = {
29928 .name = DRV_NAME,
29929 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29930 .port_ops = &triflex_port_ops,
29931 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29932 index 4b42ca0..e494a98 100644
29933 --- a/drivers/ide/trm290.c
29934 +++ b/drivers/ide/trm290.c
29935 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29936 .dma_check = trm290_dma_check,
29937 };
29938
29939 -static const struct ide_port_info trm290_chipset __devinitdata = {
29940 +static const struct ide_port_info trm290_chipset __devinitconst = {
29941 .name = DRV_NAME,
29942 .init_hwif = init_hwif_trm290,
29943 .tp_ops = &trm290_tp_ops,
29944 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29945 index f46f49c..eb77678 100644
29946 --- a/drivers/ide/via82cxxx.c
29947 +++ b/drivers/ide/via82cxxx.c
29948 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29949 .cable_detect = via82cxxx_cable_detect,
29950 };
29951
29952 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29953 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29954 .name = DRV_NAME,
29955 .init_chipset = init_chipset_via82cxxx,
29956 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29957 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29958 index eb0e2cc..14241c7 100644
29959 --- a/drivers/ieee802154/fakehard.c
29960 +++ b/drivers/ieee802154/fakehard.c
29961 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29962 phy->transmit_power = 0xbf;
29963
29964 dev->netdev_ops = &fake_ops;
29965 - dev->ml_priv = &fake_mlme;
29966 + dev->ml_priv = (void *)&fake_mlme;
29967
29968 priv = netdev_priv(dev);
29969 priv->phy = phy;
29970 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29971 index 8b72f39..55df4c8 100644
29972 --- a/drivers/infiniband/core/cm.c
29973 +++ b/drivers/infiniband/core/cm.c
29974 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29975
29976 struct cm_counter_group {
29977 struct kobject obj;
29978 - atomic_long_t counter[CM_ATTR_COUNT];
29979 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29980 };
29981
29982 struct cm_counter_attribute {
29983 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29984 struct ib_mad_send_buf *msg = NULL;
29985 int ret;
29986
29987 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29988 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29989 counter[CM_REQ_COUNTER]);
29990
29991 /* Quick state check to discard duplicate REQs. */
29992 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29993 if (!cm_id_priv)
29994 return;
29995
29996 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29997 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29998 counter[CM_REP_COUNTER]);
29999 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30000 if (ret)
30001 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
30002 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30003 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30004 spin_unlock_irq(&cm_id_priv->lock);
30005 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30006 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30007 counter[CM_RTU_COUNTER]);
30008 goto out;
30009 }
30010 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
30011 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30012 dreq_msg->local_comm_id);
30013 if (!cm_id_priv) {
30014 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30015 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30016 counter[CM_DREQ_COUNTER]);
30017 cm_issue_drep(work->port, work->mad_recv_wc);
30018 return -EINVAL;
30019 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
30020 case IB_CM_MRA_REP_RCVD:
30021 break;
30022 case IB_CM_TIMEWAIT:
30023 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30024 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30025 counter[CM_DREQ_COUNTER]);
30026 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30027 goto unlock;
30028 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
30029 cm_free_msg(msg);
30030 goto deref;
30031 case IB_CM_DREQ_RCVD:
30032 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30033 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30034 counter[CM_DREQ_COUNTER]);
30035 goto unlock;
30036 default:
30037 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
30038 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30039 cm_id_priv->msg, timeout)) {
30040 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30041 - atomic_long_inc(&work->port->
30042 + atomic_long_inc_unchecked(&work->port->
30043 counter_group[CM_RECV_DUPLICATES].
30044 counter[CM_MRA_COUNTER]);
30045 goto out;
30046 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
30047 break;
30048 case IB_CM_MRA_REQ_RCVD:
30049 case IB_CM_MRA_REP_RCVD:
30050 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30051 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30052 counter[CM_MRA_COUNTER]);
30053 /* fall through */
30054 default:
30055 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
30056 case IB_CM_LAP_IDLE:
30057 break;
30058 case IB_CM_MRA_LAP_SENT:
30059 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30060 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30061 counter[CM_LAP_COUNTER]);
30062 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30063 goto unlock;
30064 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
30065 cm_free_msg(msg);
30066 goto deref;
30067 case IB_CM_LAP_RCVD:
30068 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30069 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30070 counter[CM_LAP_COUNTER]);
30071 goto unlock;
30072 default:
30073 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
30074 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30075 if (cur_cm_id_priv) {
30076 spin_unlock_irq(&cm.lock);
30077 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30078 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30079 counter[CM_SIDR_REQ_COUNTER]);
30080 goto out; /* Duplicate message. */
30081 }
30082 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
30083 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30084 msg->retries = 1;
30085
30086 - atomic_long_add(1 + msg->retries,
30087 + atomic_long_add_unchecked(1 + msg->retries,
30088 &port->counter_group[CM_XMIT].counter[attr_index]);
30089 if (msg->retries)
30090 - atomic_long_add(msg->retries,
30091 + atomic_long_add_unchecked(msg->retries,
30092 &port->counter_group[CM_XMIT_RETRIES].
30093 counter[attr_index]);
30094
30095 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
30096 }
30097
30098 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30099 - atomic_long_inc(&port->counter_group[CM_RECV].
30100 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30101 counter[attr_id - CM_ATTR_ID_OFFSET]);
30102
30103 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30104 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
30105 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30106
30107 return sprintf(buf, "%ld\n",
30108 - atomic_long_read(&group->counter[cm_attr->index]));
30109 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30110 }
30111
30112 static const struct sysfs_ops cm_counter_ops = {
30113 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
30114 index 176c8f9..2627b62 100644
30115 --- a/drivers/infiniband/core/fmr_pool.c
30116 +++ b/drivers/infiniband/core/fmr_pool.c
30117 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
30118
30119 struct task_struct *thread;
30120
30121 - atomic_t req_ser;
30122 - atomic_t flush_ser;
30123 + atomic_unchecked_t req_ser;
30124 + atomic_unchecked_t flush_ser;
30125
30126 wait_queue_head_t force_wait;
30127 };
30128 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30129 struct ib_fmr_pool *pool = pool_ptr;
30130
30131 do {
30132 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30133 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30134 ib_fmr_batch_release(pool);
30135
30136 - atomic_inc(&pool->flush_ser);
30137 + atomic_inc_unchecked(&pool->flush_ser);
30138 wake_up_interruptible(&pool->force_wait);
30139
30140 if (pool->flush_function)
30141 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30142 }
30143
30144 set_current_state(TASK_INTERRUPTIBLE);
30145 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30146 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30147 !kthread_should_stop())
30148 schedule();
30149 __set_current_state(TASK_RUNNING);
30150 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30151 pool->dirty_watermark = params->dirty_watermark;
30152 pool->dirty_len = 0;
30153 spin_lock_init(&pool->pool_lock);
30154 - atomic_set(&pool->req_ser, 0);
30155 - atomic_set(&pool->flush_ser, 0);
30156 + atomic_set_unchecked(&pool->req_ser, 0);
30157 + atomic_set_unchecked(&pool->flush_ser, 0);
30158 init_waitqueue_head(&pool->force_wait);
30159
30160 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30161 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30162 }
30163 spin_unlock_irq(&pool->pool_lock);
30164
30165 - serial = atomic_inc_return(&pool->req_ser);
30166 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30167 wake_up_process(pool->thread);
30168
30169 if (wait_event_interruptible(pool->force_wait,
30170 - atomic_read(&pool->flush_ser) - serial >= 0))
30171 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30172 return -EINTR;
30173
30174 return 0;
30175 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30176 } else {
30177 list_add_tail(&fmr->list, &pool->dirty_list);
30178 if (++pool->dirty_len >= pool->dirty_watermark) {
30179 - atomic_inc(&pool->req_ser);
30180 + atomic_inc_unchecked(&pool->req_ser);
30181 wake_up_process(pool->thread);
30182 }
30183 }
30184 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30185 index 40c8353..946b0e4 100644
30186 --- a/drivers/infiniband/hw/cxgb4/mem.c
30187 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30188 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30189 int err;
30190 struct fw_ri_tpte tpt;
30191 u32 stag_idx;
30192 - static atomic_t key;
30193 + static atomic_unchecked_t key;
30194
30195 if (c4iw_fatal_error(rdev))
30196 return -EIO;
30197 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30198 &rdev->resource.tpt_fifo_lock);
30199 if (!stag_idx)
30200 return -ENOMEM;
30201 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30202 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30203 }
30204 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30205 __func__, stag_state, type, pdid, stag_idx);
30206 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30207 index 79b3dbc..96e5fcc 100644
30208 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30209 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30210 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30211 struct ib_atomic_eth *ateth;
30212 struct ipath_ack_entry *e;
30213 u64 vaddr;
30214 - atomic64_t *maddr;
30215 + atomic64_unchecked_t *maddr;
30216 u64 sdata;
30217 u32 rkey;
30218 u8 next;
30219 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30220 IB_ACCESS_REMOTE_ATOMIC)))
30221 goto nack_acc_unlck;
30222 /* Perform atomic OP and save result. */
30223 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30224 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30225 sdata = be64_to_cpu(ateth->swap_data);
30226 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30227 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30228 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30229 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30230 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30231 be64_to_cpu(ateth->compare_data),
30232 sdata);
30233 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30234 index 1f95bba..9530f87 100644
30235 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30236 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30237 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30238 unsigned long flags;
30239 struct ib_wc wc;
30240 u64 sdata;
30241 - atomic64_t *maddr;
30242 + atomic64_unchecked_t *maddr;
30243 enum ib_wc_status send_status;
30244
30245 /*
30246 @@ -382,11 +382,11 @@ again:
30247 IB_ACCESS_REMOTE_ATOMIC)))
30248 goto acc_err;
30249 /* Perform atomic OP and save result. */
30250 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30251 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30252 sdata = wqe->wr.wr.atomic.compare_add;
30253 *(u64 *) sqp->s_sge.sge.vaddr =
30254 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30255 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30256 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30257 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30258 sdata, wqe->wr.wr.atomic.swap);
30259 goto send_comp;
30260 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30261 index 5965b3d..16817fb 100644
30262 --- a/drivers/infiniband/hw/nes/nes.c
30263 +++ b/drivers/infiniband/hw/nes/nes.c
30264 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30265 LIST_HEAD(nes_adapter_list);
30266 static LIST_HEAD(nes_dev_list);
30267
30268 -atomic_t qps_destroyed;
30269 +atomic_unchecked_t qps_destroyed;
30270
30271 static unsigned int ee_flsh_adapter;
30272 static unsigned int sysfs_nonidx_addr;
30273 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30274 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30275 struct nes_adapter *nesadapter = nesdev->nesadapter;
30276
30277 - atomic_inc(&qps_destroyed);
30278 + atomic_inc_unchecked(&qps_destroyed);
30279
30280 /* Free the control structures */
30281
30282 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30283 index 568b4f1..5ea3eff 100644
30284 --- a/drivers/infiniband/hw/nes/nes.h
30285 +++ b/drivers/infiniband/hw/nes/nes.h
30286 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30287 extern unsigned int wqm_quanta;
30288 extern struct list_head nes_adapter_list;
30289
30290 -extern atomic_t cm_connects;
30291 -extern atomic_t cm_accepts;
30292 -extern atomic_t cm_disconnects;
30293 -extern atomic_t cm_closes;
30294 -extern atomic_t cm_connecteds;
30295 -extern atomic_t cm_connect_reqs;
30296 -extern atomic_t cm_rejects;
30297 -extern atomic_t mod_qp_timouts;
30298 -extern atomic_t qps_created;
30299 -extern atomic_t qps_destroyed;
30300 -extern atomic_t sw_qps_destroyed;
30301 +extern atomic_unchecked_t cm_connects;
30302 +extern atomic_unchecked_t cm_accepts;
30303 +extern atomic_unchecked_t cm_disconnects;
30304 +extern atomic_unchecked_t cm_closes;
30305 +extern atomic_unchecked_t cm_connecteds;
30306 +extern atomic_unchecked_t cm_connect_reqs;
30307 +extern atomic_unchecked_t cm_rejects;
30308 +extern atomic_unchecked_t mod_qp_timouts;
30309 +extern atomic_unchecked_t qps_created;
30310 +extern atomic_unchecked_t qps_destroyed;
30311 +extern atomic_unchecked_t sw_qps_destroyed;
30312 extern u32 mh_detected;
30313 extern u32 mh_pauses_sent;
30314 extern u32 cm_packets_sent;
30315 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30316 extern u32 cm_packets_received;
30317 extern u32 cm_packets_dropped;
30318 extern u32 cm_packets_retrans;
30319 -extern atomic_t cm_listens_created;
30320 -extern atomic_t cm_listens_destroyed;
30321 +extern atomic_unchecked_t cm_listens_created;
30322 +extern atomic_unchecked_t cm_listens_destroyed;
30323 extern u32 cm_backlog_drops;
30324 -extern atomic_t cm_loopbacks;
30325 -extern atomic_t cm_nodes_created;
30326 -extern atomic_t cm_nodes_destroyed;
30327 -extern atomic_t cm_accel_dropped_pkts;
30328 -extern atomic_t cm_resets_recvd;
30329 -extern atomic_t pau_qps_created;
30330 -extern atomic_t pau_qps_destroyed;
30331 +extern atomic_unchecked_t cm_loopbacks;
30332 +extern atomic_unchecked_t cm_nodes_created;
30333 +extern atomic_unchecked_t cm_nodes_destroyed;
30334 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30335 +extern atomic_unchecked_t cm_resets_recvd;
30336 +extern atomic_unchecked_t pau_qps_created;
30337 +extern atomic_unchecked_t pau_qps_destroyed;
30338
30339 extern u32 int_mod_timer_init;
30340 extern u32 int_mod_cq_depth_256;
30341 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30342 index 0a52d72..0642f36 100644
30343 --- a/drivers/infiniband/hw/nes/nes_cm.c
30344 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30345 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30346 u32 cm_packets_retrans;
30347 u32 cm_packets_created;
30348 u32 cm_packets_received;
30349 -atomic_t cm_listens_created;
30350 -atomic_t cm_listens_destroyed;
30351 +atomic_unchecked_t cm_listens_created;
30352 +atomic_unchecked_t cm_listens_destroyed;
30353 u32 cm_backlog_drops;
30354 -atomic_t cm_loopbacks;
30355 -atomic_t cm_nodes_created;
30356 -atomic_t cm_nodes_destroyed;
30357 -atomic_t cm_accel_dropped_pkts;
30358 -atomic_t cm_resets_recvd;
30359 +atomic_unchecked_t cm_loopbacks;
30360 +atomic_unchecked_t cm_nodes_created;
30361 +atomic_unchecked_t cm_nodes_destroyed;
30362 +atomic_unchecked_t cm_accel_dropped_pkts;
30363 +atomic_unchecked_t cm_resets_recvd;
30364
30365 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30366 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30367 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30368
30369 static struct nes_cm_core *g_cm_core;
30370
30371 -atomic_t cm_connects;
30372 -atomic_t cm_accepts;
30373 -atomic_t cm_disconnects;
30374 -atomic_t cm_closes;
30375 -atomic_t cm_connecteds;
30376 -atomic_t cm_connect_reqs;
30377 -atomic_t cm_rejects;
30378 +atomic_unchecked_t cm_connects;
30379 +atomic_unchecked_t cm_accepts;
30380 +atomic_unchecked_t cm_disconnects;
30381 +atomic_unchecked_t cm_closes;
30382 +atomic_unchecked_t cm_connecteds;
30383 +atomic_unchecked_t cm_connect_reqs;
30384 +atomic_unchecked_t cm_rejects;
30385
30386 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30387 {
30388 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30389 kfree(listener);
30390 listener = NULL;
30391 ret = 0;
30392 - atomic_inc(&cm_listens_destroyed);
30393 + atomic_inc_unchecked(&cm_listens_destroyed);
30394 } else {
30395 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30396 }
30397 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30398 cm_node->rem_mac);
30399
30400 add_hte_node(cm_core, cm_node);
30401 - atomic_inc(&cm_nodes_created);
30402 + atomic_inc_unchecked(&cm_nodes_created);
30403
30404 return cm_node;
30405 }
30406 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30407 }
30408
30409 atomic_dec(&cm_core->node_cnt);
30410 - atomic_inc(&cm_nodes_destroyed);
30411 + atomic_inc_unchecked(&cm_nodes_destroyed);
30412 nesqp = cm_node->nesqp;
30413 if (nesqp) {
30414 nesqp->cm_node = NULL;
30415 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30416
30417 static void drop_packet(struct sk_buff *skb)
30418 {
30419 - atomic_inc(&cm_accel_dropped_pkts);
30420 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30421 dev_kfree_skb_any(skb);
30422 }
30423
30424 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30425 {
30426
30427 int reset = 0; /* whether to send reset in case of err.. */
30428 - atomic_inc(&cm_resets_recvd);
30429 + atomic_inc_unchecked(&cm_resets_recvd);
30430 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30431 " refcnt=%d\n", cm_node, cm_node->state,
30432 atomic_read(&cm_node->ref_count));
30433 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30434 rem_ref_cm_node(cm_node->cm_core, cm_node);
30435 return NULL;
30436 }
30437 - atomic_inc(&cm_loopbacks);
30438 + atomic_inc_unchecked(&cm_loopbacks);
30439 loopbackremotenode->loopbackpartner = cm_node;
30440 loopbackremotenode->tcp_cntxt.rcv_wscale =
30441 NES_CM_DEFAULT_RCV_WND_SCALE;
30442 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30443 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30444 else {
30445 rem_ref_cm_node(cm_core, cm_node);
30446 - atomic_inc(&cm_accel_dropped_pkts);
30447 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30448 dev_kfree_skb_any(skb);
30449 }
30450 break;
30451 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30452
30453 if ((cm_id) && (cm_id->event_handler)) {
30454 if (issue_disconn) {
30455 - atomic_inc(&cm_disconnects);
30456 + atomic_inc_unchecked(&cm_disconnects);
30457 cm_event.event = IW_CM_EVENT_DISCONNECT;
30458 cm_event.status = disconn_status;
30459 cm_event.local_addr = cm_id->local_addr;
30460 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30461 }
30462
30463 if (issue_close) {
30464 - atomic_inc(&cm_closes);
30465 + atomic_inc_unchecked(&cm_closes);
30466 nes_disconnect(nesqp, 1);
30467
30468 cm_id->provider_data = nesqp;
30469 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30470
30471 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30472 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30473 - atomic_inc(&cm_accepts);
30474 + atomic_inc_unchecked(&cm_accepts);
30475
30476 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30477 netdev_refcnt_read(nesvnic->netdev));
30478 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30479 struct nes_cm_core *cm_core;
30480 u8 *start_buff;
30481
30482 - atomic_inc(&cm_rejects);
30483 + atomic_inc_unchecked(&cm_rejects);
30484 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30485 loopback = cm_node->loopbackpartner;
30486 cm_core = cm_node->cm_core;
30487 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30488 ntohl(cm_id->local_addr.sin_addr.s_addr),
30489 ntohs(cm_id->local_addr.sin_port));
30490
30491 - atomic_inc(&cm_connects);
30492 + atomic_inc_unchecked(&cm_connects);
30493 nesqp->active_conn = 1;
30494
30495 /* cache the cm_id in the qp */
30496 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30497 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30498 return err;
30499 }
30500 - atomic_inc(&cm_listens_created);
30501 + atomic_inc_unchecked(&cm_listens_created);
30502 }
30503
30504 cm_id->add_ref(cm_id);
30505 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30506
30507 if (nesqp->destroyed)
30508 return;
30509 - atomic_inc(&cm_connecteds);
30510 + atomic_inc_unchecked(&cm_connecteds);
30511 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30512 " local port 0x%04X. jiffies = %lu.\n",
30513 nesqp->hwqp.qp_id,
30514 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30515
30516 cm_id->add_ref(cm_id);
30517 ret = cm_id->event_handler(cm_id, &cm_event);
30518 - atomic_inc(&cm_closes);
30519 + atomic_inc_unchecked(&cm_closes);
30520 cm_event.event = IW_CM_EVENT_CLOSE;
30521 cm_event.status = 0;
30522 cm_event.provider_data = cm_id->provider_data;
30523 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30524 return;
30525 cm_id = cm_node->cm_id;
30526
30527 - atomic_inc(&cm_connect_reqs);
30528 + atomic_inc_unchecked(&cm_connect_reqs);
30529 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30530 cm_node, cm_id, jiffies);
30531
30532 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30533 return;
30534 cm_id = cm_node->cm_id;
30535
30536 - atomic_inc(&cm_connect_reqs);
30537 + atomic_inc_unchecked(&cm_connect_reqs);
30538 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30539 cm_node, cm_id, jiffies);
30540
30541 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30542 index b3b2a24..7bfaf1e 100644
30543 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30544 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30545 @@ -40,8 +40,8 @@
30546 #include "nes.h"
30547 #include "nes_mgt.h"
30548
30549 -atomic_t pau_qps_created;
30550 -atomic_t pau_qps_destroyed;
30551 +atomic_unchecked_t pau_qps_created;
30552 +atomic_unchecked_t pau_qps_destroyed;
30553
30554 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30555 {
30556 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30557 {
30558 struct sk_buff *skb;
30559 unsigned long flags;
30560 - atomic_inc(&pau_qps_destroyed);
30561 + atomic_inc_unchecked(&pau_qps_destroyed);
30562
30563 /* Free packets that have not yet been forwarded */
30564 /* Lock is acquired by skb_dequeue when removing the skb */
30565 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30566 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30567 skb_queue_head_init(&nesqp->pau_list);
30568 spin_lock_init(&nesqp->pau_lock);
30569 - atomic_inc(&pau_qps_created);
30570 + atomic_inc_unchecked(&pau_qps_created);
30571 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30572 }
30573
30574 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30575 index c00d2f3..8834298 100644
30576 --- a/drivers/infiniband/hw/nes/nes_nic.c
30577 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30578 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30579 target_stat_values[++index] = mh_detected;
30580 target_stat_values[++index] = mh_pauses_sent;
30581 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30582 - target_stat_values[++index] = atomic_read(&cm_connects);
30583 - target_stat_values[++index] = atomic_read(&cm_accepts);
30584 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30585 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30586 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30587 - target_stat_values[++index] = atomic_read(&cm_rejects);
30588 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30589 - target_stat_values[++index] = atomic_read(&qps_created);
30590 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30591 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30592 - target_stat_values[++index] = atomic_read(&cm_closes);
30593 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30594 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30595 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30596 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30597 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30598 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30599 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30600 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30601 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30602 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30603 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30604 target_stat_values[++index] = cm_packets_sent;
30605 target_stat_values[++index] = cm_packets_bounced;
30606 target_stat_values[++index] = cm_packets_created;
30607 target_stat_values[++index] = cm_packets_received;
30608 target_stat_values[++index] = cm_packets_dropped;
30609 target_stat_values[++index] = cm_packets_retrans;
30610 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30611 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30612 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30613 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30614 target_stat_values[++index] = cm_backlog_drops;
30615 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30616 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30617 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30618 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30619 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30620 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30621 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30622 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30623 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30624 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30625 target_stat_values[++index] = nesadapter->free_4kpbl;
30626 target_stat_values[++index] = nesadapter->free_256pbl;
30627 target_stat_values[++index] = int_mod_timer_init;
30628 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30629 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30630 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30631 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30632 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30633 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30634 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30635 }
30636
30637 /**
30638 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30639 index 5095bc4..41e8fff 100644
30640 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30641 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30642 @@ -46,9 +46,9 @@
30643
30644 #include <rdma/ib_umem.h>
30645
30646 -atomic_t mod_qp_timouts;
30647 -atomic_t qps_created;
30648 -atomic_t sw_qps_destroyed;
30649 +atomic_unchecked_t mod_qp_timouts;
30650 +atomic_unchecked_t qps_created;
30651 +atomic_unchecked_t sw_qps_destroyed;
30652
30653 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30654
30655 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30656 if (init_attr->create_flags)
30657 return ERR_PTR(-EINVAL);
30658
30659 - atomic_inc(&qps_created);
30660 + atomic_inc_unchecked(&qps_created);
30661 switch (init_attr->qp_type) {
30662 case IB_QPT_RC:
30663 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30664 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30665 struct iw_cm_event cm_event;
30666 int ret = 0;
30667
30668 - atomic_inc(&sw_qps_destroyed);
30669 + atomic_inc_unchecked(&sw_qps_destroyed);
30670 nesqp->destroyed = 1;
30671
30672 /* Blow away the connection if it exists. */
30673 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30674 index b881bdc..c2e360c 100644
30675 --- a/drivers/infiniband/hw/qib/qib.h
30676 +++ b/drivers/infiniband/hw/qib/qib.h
30677 @@ -51,6 +51,7 @@
30678 #include <linux/completion.h>
30679 #include <linux/kref.h>
30680 #include <linux/sched.h>
30681 +#include <linux/slab.h>
30682
30683 #include "qib_common.h"
30684 #include "qib_verbs.h"
30685 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30686 index c351aa4..e6967c2 100644
30687 --- a/drivers/input/gameport/gameport.c
30688 +++ b/drivers/input/gameport/gameport.c
30689 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30690 */
30691 static void gameport_init_port(struct gameport *gameport)
30692 {
30693 - static atomic_t gameport_no = ATOMIC_INIT(0);
30694 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30695
30696 __module_get(THIS_MODULE);
30697
30698 mutex_init(&gameport->drv_mutex);
30699 device_initialize(&gameport->dev);
30700 dev_set_name(&gameport->dev, "gameport%lu",
30701 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30702 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30703 gameport->dev.bus = &gameport_bus;
30704 gameport->dev.release = gameport_release_port;
30705 if (gameport->parent)
30706 diff --git a/drivers/input/input.c b/drivers/input/input.c
30707 index da38d97..2aa0b79 100644
30708 --- a/drivers/input/input.c
30709 +++ b/drivers/input/input.c
30710 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30711 */
30712 int input_register_device(struct input_dev *dev)
30713 {
30714 - static atomic_t input_no = ATOMIC_INIT(0);
30715 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30716 struct input_handler *handler;
30717 const char *path;
30718 int error;
30719 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30720 dev->setkeycode = input_default_setkeycode;
30721
30722 dev_set_name(&dev->dev, "input%ld",
30723 - (unsigned long) atomic_inc_return(&input_no) - 1);
30724 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30725
30726 error = device_add(&dev->dev);
30727 if (error)
30728 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30729 index b8d8611..7a4a04b 100644
30730 --- a/drivers/input/joystick/sidewinder.c
30731 +++ b/drivers/input/joystick/sidewinder.c
30732 @@ -30,6 +30,7 @@
30733 #include <linux/kernel.h>
30734 #include <linux/module.h>
30735 #include <linux/slab.h>
30736 +#include <linux/sched.h>
30737 #include <linux/init.h>
30738 #include <linux/input.h>
30739 #include <linux/gameport.h>
30740 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30741 index d728875..844c89b 100644
30742 --- a/drivers/input/joystick/xpad.c
30743 +++ b/drivers/input/joystick/xpad.c
30744 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30745
30746 static int xpad_led_probe(struct usb_xpad *xpad)
30747 {
30748 - static atomic_t led_seq = ATOMIC_INIT(0);
30749 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30750 long led_no;
30751 struct xpad_led *led;
30752 struct led_classdev *led_cdev;
30753 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30754 if (!led)
30755 return -ENOMEM;
30756
30757 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30758 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30759
30760 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30761 led->xpad = xpad;
30762 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30763 index 0110b5a..d3ad144 100644
30764 --- a/drivers/input/mousedev.c
30765 +++ b/drivers/input/mousedev.c
30766 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30767
30768 spin_unlock_irq(&client->packet_lock);
30769
30770 - if (copy_to_user(buffer, data, count))
30771 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30772 return -EFAULT;
30773
30774 return count;
30775 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30776 index ba70058..571d25d 100644
30777 --- a/drivers/input/serio/serio.c
30778 +++ b/drivers/input/serio/serio.c
30779 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30780 */
30781 static void serio_init_port(struct serio *serio)
30782 {
30783 - static atomic_t serio_no = ATOMIC_INIT(0);
30784 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30785
30786 __module_get(THIS_MODULE);
30787
30788 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30789 mutex_init(&serio->drv_mutex);
30790 device_initialize(&serio->dev);
30791 dev_set_name(&serio->dev, "serio%ld",
30792 - (long)atomic_inc_return(&serio_no) - 1);
30793 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30794 serio->dev.bus = &serio_bus;
30795 serio->dev.release = serio_release_port;
30796 serio->dev.groups = serio_device_attr_groups;
30797 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30798 index e44933d..9ba484a 100644
30799 --- a/drivers/isdn/capi/capi.c
30800 +++ b/drivers/isdn/capi/capi.c
30801 @@ -83,8 +83,8 @@ struct capiminor {
30802
30803 struct capi20_appl *ap;
30804 u32 ncci;
30805 - atomic_t datahandle;
30806 - atomic_t msgid;
30807 + atomic_unchecked_t datahandle;
30808 + atomic_unchecked_t msgid;
30809
30810 struct tty_port port;
30811 int ttyinstop;
30812 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30813 capimsg_setu16(s, 2, mp->ap->applid);
30814 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30815 capimsg_setu8 (s, 5, CAPI_RESP);
30816 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30817 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30818 capimsg_setu32(s, 8, mp->ncci);
30819 capimsg_setu16(s, 12, datahandle);
30820 }
30821 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30822 mp->outbytes -= len;
30823 spin_unlock_bh(&mp->outlock);
30824
30825 - datahandle = atomic_inc_return(&mp->datahandle);
30826 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30827 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30828 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30829 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30830 capimsg_setu16(skb->data, 2, mp->ap->applid);
30831 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30832 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30833 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30834 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30835 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30836 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30837 capimsg_setu16(skb->data, 16, len); /* Data length */
30838 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30839 index db621db..825ea1a 100644
30840 --- a/drivers/isdn/gigaset/common.c
30841 +++ b/drivers/isdn/gigaset/common.c
30842 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30843 cs->commands_pending = 0;
30844 cs->cur_at_seq = 0;
30845 cs->gotfwver = -1;
30846 - cs->open_count = 0;
30847 + local_set(&cs->open_count, 0);
30848 cs->dev = NULL;
30849 cs->tty = NULL;
30850 cs->tty_dev = NULL;
30851 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30852 index 212efaf..f187c6b 100644
30853 --- a/drivers/isdn/gigaset/gigaset.h
30854 +++ b/drivers/isdn/gigaset/gigaset.h
30855 @@ -35,6 +35,7 @@
30856 #include <linux/tty_driver.h>
30857 #include <linux/list.h>
30858 #include <linux/atomic.h>
30859 +#include <asm/local.h>
30860
30861 #define GIG_VERSION {0, 5, 0, 0}
30862 #define GIG_COMPAT {0, 4, 0, 0}
30863 @@ -433,7 +434,7 @@ struct cardstate {
30864 spinlock_t cmdlock;
30865 unsigned curlen, cmdbytes;
30866
30867 - unsigned open_count;
30868 + local_t open_count;
30869 struct tty_struct *tty;
30870 struct tasklet_struct if_wake_tasklet;
30871 unsigned control_state;
30872 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30873 index ee0a549..a7c9798 100644
30874 --- a/drivers/isdn/gigaset/interface.c
30875 +++ b/drivers/isdn/gigaset/interface.c
30876 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30877 }
30878 tty->driver_data = cs;
30879
30880 - ++cs->open_count;
30881 -
30882 - if (cs->open_count == 1) {
30883 + if (local_inc_return(&cs->open_count) == 1) {
30884 spin_lock_irqsave(&cs->lock, flags);
30885 cs->tty = tty;
30886 spin_unlock_irqrestore(&cs->lock, flags);
30887 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30888
30889 if (!cs->connected)
30890 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30891 - else if (!cs->open_count)
30892 + else if (!local_read(&cs->open_count))
30893 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30894 else {
30895 - if (!--cs->open_count) {
30896 + if (!local_dec_return(&cs->open_count)) {
30897 spin_lock_irqsave(&cs->lock, flags);
30898 cs->tty = NULL;
30899 spin_unlock_irqrestore(&cs->lock, flags);
30900 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30901 if (!cs->connected) {
30902 gig_dbg(DEBUG_IF, "not connected");
30903 retval = -ENODEV;
30904 - } else if (!cs->open_count)
30905 + } else if (!local_read(&cs->open_count))
30906 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30907 else {
30908 retval = 0;
30909 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30910 retval = -ENODEV;
30911 goto done;
30912 }
30913 - if (!cs->open_count) {
30914 + if (!local_read(&cs->open_count)) {
30915 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30916 retval = -ENODEV;
30917 goto done;
30918 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30919 if (!cs->connected) {
30920 gig_dbg(DEBUG_IF, "not connected");
30921 retval = -ENODEV;
30922 - } else if (!cs->open_count)
30923 + } else if (!local_read(&cs->open_count))
30924 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30925 else if (cs->mstate != MS_LOCKED) {
30926 dev_warn(cs->dev, "can't write to unlocked device\n");
30927 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30928
30929 if (!cs->connected)
30930 gig_dbg(DEBUG_IF, "not connected");
30931 - else if (!cs->open_count)
30932 + else if (!local_read(&cs->open_count))
30933 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30934 else if (cs->mstate != MS_LOCKED)
30935 dev_warn(cs->dev, "can't write to unlocked device\n");
30936 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30937
30938 if (!cs->connected)
30939 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30940 - else if (!cs->open_count)
30941 + else if (!local_read(&cs->open_count))
30942 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30943 else
30944 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30945 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30946
30947 if (!cs->connected)
30948 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30949 - else if (!cs->open_count)
30950 + else if (!local_read(&cs->open_count))
30951 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30952 else
30953 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30954 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30955 goto out;
30956 }
30957
30958 - if (!cs->open_count) {
30959 + if (!local_read(&cs->open_count)) {
30960 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30961 goto out;
30962 }
30963 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30964 index 2a57da59..e7a12ed 100644
30965 --- a/drivers/isdn/hardware/avm/b1.c
30966 +++ b/drivers/isdn/hardware/avm/b1.c
30967 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30968 }
30969 if (left) {
30970 if (t4file->user) {
30971 - if (copy_from_user(buf, dp, left))
30972 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30973 return -EFAULT;
30974 } else {
30975 memcpy(buf, dp, left);
30976 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30977 }
30978 if (left) {
30979 if (config->user) {
30980 - if (copy_from_user(buf, dp, left))
30981 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30982 return -EFAULT;
30983 } else {
30984 memcpy(buf, dp, left);
30985 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30986 index 85784a7..a19ca98 100644
30987 --- a/drivers/isdn/hardware/eicon/divasync.h
30988 +++ b/drivers/isdn/hardware/eicon/divasync.h
30989 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30990 } diva_didd_add_adapter_t;
30991 typedef struct _diva_didd_remove_adapter {
30992 IDI_CALL p_request;
30993 -} diva_didd_remove_adapter_t;
30994 +} __no_const diva_didd_remove_adapter_t;
30995 typedef struct _diva_didd_read_adapter_array {
30996 void * buffer;
30997 dword length;
30998 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30999 index a3bd163..8956575 100644
31000 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
31001 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
31002 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31003 typedef struct _diva_os_idi_adapter_interface {
31004 diva_init_card_proc_t cleanup_adapter_proc;
31005 diva_cmd_card_proc_t cmd_proc;
31006 -} diva_os_idi_adapter_interface_t;
31007 +} __no_const diva_os_idi_adapter_interface_t;
31008
31009 typedef struct _diva_os_xdi_adapter {
31010 struct list_head link;
31011 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
31012 index 2339d73..802ab87 100644
31013 --- a/drivers/isdn/i4l/isdn_net.c
31014 +++ b/drivers/isdn/i4l/isdn_net.c
31015 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
31016 {
31017 isdn_net_local *lp = netdev_priv(dev);
31018 unsigned char *p;
31019 - ushort len = 0;
31020 + int len = 0;
31021
31022 switch (lp->p_encap) {
31023 case ISDN_NET_ENCAP_ETHER:
31024 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31025 index 1f355bb..43f1fea 100644
31026 --- a/drivers/isdn/icn/icn.c
31027 +++ b/drivers/isdn/icn/icn.c
31028 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31029 if (count > len)
31030 count = len;
31031 if (user) {
31032 - if (copy_from_user(msg, buf, count))
31033 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31034 return -EFAULT;
31035 } else
31036 memcpy(msg, buf, count);
31037 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31038 index b5fdcb7..5b6c59f 100644
31039 --- a/drivers/lguest/core.c
31040 +++ b/drivers/lguest/core.c
31041 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
31042 * it's worked so far. The end address needs +1 because __get_vm_area
31043 * allocates an extra guard page, so we need space for that.
31044 */
31045 +
31046 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31047 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31048 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31049 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31050 +#else
31051 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31052 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31053 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31054 +#endif
31055 +
31056 if (!switcher_vma) {
31057 err = -ENOMEM;
31058 printk("lguest: could not map switcher pages high\n");
31059 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
31060 * Now the Switcher is mapped at the right address, we can't fail!
31061 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31062 */
31063 - memcpy(switcher_vma->addr, start_switcher_text,
31064 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31065 end_switcher_text - start_switcher_text);
31066
31067 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31068 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31069 index 65af42f..530c87a 100644
31070 --- a/drivers/lguest/x86/core.c
31071 +++ b/drivers/lguest/x86/core.c
31072 @@ -59,7 +59,7 @@ static struct {
31073 /* Offset from where switcher.S was compiled to where we've copied it */
31074 static unsigned long switcher_offset(void)
31075 {
31076 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31077 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31078 }
31079
31080 /* This cpu's struct lguest_pages. */
31081 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31082 * These copies are pretty cheap, so we do them unconditionally: */
31083 /* Save the current Host top-level page directory.
31084 */
31085 +
31086 +#ifdef CONFIG_PAX_PER_CPU_PGD
31087 + pages->state.host_cr3 = read_cr3();
31088 +#else
31089 pages->state.host_cr3 = __pa(current->mm->pgd);
31090 +#endif
31091 +
31092 /*
31093 * Set up the Guest's page tables to see this CPU's pages (and no
31094 * other CPU's pages).
31095 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31096 * compiled-in switcher code and the high-mapped copy we just made.
31097 */
31098 for (i = 0; i < IDT_ENTRIES; i++)
31099 - default_idt_entries[i] += switcher_offset();
31100 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31101
31102 /*
31103 * Set up the Switcher's per-cpu areas.
31104 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31105 * it will be undisturbed when we switch. To change %cs and jump we
31106 * need this structure to feed to Intel's "lcall" instruction.
31107 */
31108 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31109 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31110 lguest_entry.segment = LGUEST_CS;
31111
31112 /*
31113 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31114 index 40634b0..4f5855e 100644
31115 --- a/drivers/lguest/x86/switcher_32.S
31116 +++ b/drivers/lguest/x86/switcher_32.S
31117 @@ -87,6 +87,7 @@
31118 #include <asm/page.h>
31119 #include <asm/segment.h>
31120 #include <asm/lguest.h>
31121 +#include <asm/processor-flags.h>
31122
31123 // We mark the start of the code to copy
31124 // It's placed in .text tho it's never run here
31125 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31126 // Changes type when we load it: damn Intel!
31127 // For after we switch over our page tables
31128 // That entry will be read-only: we'd crash.
31129 +
31130 +#ifdef CONFIG_PAX_KERNEXEC
31131 + mov %cr0, %edx
31132 + xor $X86_CR0_WP, %edx
31133 + mov %edx, %cr0
31134 +#endif
31135 +
31136 movl $(GDT_ENTRY_TSS*8), %edx
31137 ltr %dx
31138
31139 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31140 // Let's clear it again for our return.
31141 // The GDT descriptor of the Host
31142 // Points to the table after two "size" bytes
31143 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31144 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31145 // Clear "used" from type field (byte 5, bit 2)
31146 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31147 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31148 +
31149 +#ifdef CONFIG_PAX_KERNEXEC
31150 + mov %cr0, %eax
31151 + xor $X86_CR0_WP, %eax
31152 + mov %eax, %cr0
31153 +#endif
31154
31155 // Once our page table's switched, the Guest is live!
31156 // The Host fades as we run this final step.
31157 @@ -295,13 +309,12 @@ deliver_to_host:
31158 // I consulted gcc, and it gave
31159 // These instructions, which I gladly credit:
31160 leal (%edx,%ebx,8), %eax
31161 - movzwl (%eax),%edx
31162 - movl 4(%eax), %eax
31163 - xorw %ax, %ax
31164 - orl %eax, %edx
31165 + movl 4(%eax), %edx
31166 + movw (%eax), %dx
31167 // Now the address of the handler's in %edx
31168 // We call it now: its "iret" drops us home.
31169 - jmp *%edx
31170 + ljmp $__KERNEL_CS, $1f
31171 +1: jmp *%edx
31172
31173 // Every interrupt can come to us here
31174 // But we must truly tell each apart.
31175 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31176 index 4daf9e5..b8d1d0f 100644
31177 --- a/drivers/macintosh/macio_asic.c
31178 +++ b/drivers/macintosh/macio_asic.c
31179 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31180 * MacIO is matched against any Apple ID, it's probe() function
31181 * will then decide wether it applies or not
31182 */
31183 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31184 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31185 .vendor = PCI_VENDOR_ID_APPLE,
31186 .device = PCI_ANY_ID,
31187 .subvendor = PCI_ANY_ID,
31188 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31189 index 31c2dc2..a2de7a6 100644
31190 --- a/drivers/md/dm-ioctl.c
31191 +++ b/drivers/md/dm-ioctl.c
31192 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31193 cmd == DM_LIST_VERSIONS_CMD)
31194 return 0;
31195
31196 - if ((cmd == DM_DEV_CREATE_CMD)) {
31197 + if (cmd == DM_DEV_CREATE_CMD) {
31198 if (!*param->name) {
31199 DMWARN("name not supplied when creating device");
31200 return -EINVAL;
31201 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31202 index 9bfd057..01180bc 100644
31203 --- a/drivers/md/dm-raid1.c
31204 +++ b/drivers/md/dm-raid1.c
31205 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31206
31207 struct mirror {
31208 struct mirror_set *ms;
31209 - atomic_t error_count;
31210 + atomic_unchecked_t error_count;
31211 unsigned long error_type;
31212 struct dm_dev *dev;
31213 sector_t offset;
31214 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31215 struct mirror *m;
31216
31217 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31218 - if (!atomic_read(&m->error_count))
31219 + if (!atomic_read_unchecked(&m->error_count))
31220 return m;
31221
31222 return NULL;
31223 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31224 * simple way to tell if a device has encountered
31225 * errors.
31226 */
31227 - atomic_inc(&m->error_count);
31228 + atomic_inc_unchecked(&m->error_count);
31229
31230 if (test_and_set_bit(error_type, &m->error_type))
31231 return;
31232 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31233 struct mirror *m = get_default_mirror(ms);
31234
31235 do {
31236 - if (likely(!atomic_read(&m->error_count)))
31237 + if (likely(!atomic_read_unchecked(&m->error_count)))
31238 return m;
31239
31240 if (m-- == ms->mirror)
31241 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31242 {
31243 struct mirror *default_mirror = get_default_mirror(m->ms);
31244
31245 - return !atomic_read(&default_mirror->error_count);
31246 + return !atomic_read_unchecked(&default_mirror->error_count);
31247 }
31248
31249 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31250 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31251 */
31252 if (likely(region_in_sync(ms, region, 1)))
31253 m = choose_mirror(ms, bio->bi_sector);
31254 - else if (m && atomic_read(&m->error_count))
31255 + else if (m && atomic_read_unchecked(&m->error_count))
31256 m = NULL;
31257
31258 if (likely(m))
31259 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31260 }
31261
31262 ms->mirror[mirror].ms = ms;
31263 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31264 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31265 ms->mirror[mirror].error_type = 0;
31266 ms->mirror[mirror].offset = offset;
31267
31268 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31269 */
31270 static char device_status_char(struct mirror *m)
31271 {
31272 - if (!atomic_read(&(m->error_count)))
31273 + if (!atomic_read_unchecked(&(m->error_count)))
31274 return 'A';
31275
31276 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31277 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31278 index 3d80cf0..b77cc47 100644
31279 --- a/drivers/md/dm-stripe.c
31280 +++ b/drivers/md/dm-stripe.c
31281 @@ -20,7 +20,7 @@ struct stripe {
31282 struct dm_dev *dev;
31283 sector_t physical_start;
31284
31285 - atomic_t error_count;
31286 + atomic_unchecked_t error_count;
31287 };
31288
31289 struct stripe_c {
31290 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31291 kfree(sc);
31292 return r;
31293 }
31294 - atomic_set(&(sc->stripe[i].error_count), 0);
31295 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31296 }
31297
31298 ti->private = sc;
31299 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31300 DMEMIT("%d ", sc->stripes);
31301 for (i = 0; i < sc->stripes; i++) {
31302 DMEMIT("%s ", sc->stripe[i].dev->name);
31303 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31304 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31305 'D' : 'A';
31306 }
31307 buffer[i] = '\0';
31308 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31309 */
31310 for (i = 0; i < sc->stripes; i++)
31311 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31312 - atomic_inc(&(sc->stripe[i].error_count));
31313 - if (atomic_read(&(sc->stripe[i].error_count)) <
31314 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31315 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31316 DM_IO_ERROR_THRESHOLD)
31317 schedule_work(&sc->trigger_event);
31318 }
31319 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31320 index 8e91321..fd17aef 100644
31321 --- a/drivers/md/dm-table.c
31322 +++ b/drivers/md/dm-table.c
31323 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31324 if (!dev_size)
31325 return 0;
31326
31327 - if ((start >= dev_size) || (start + len > dev_size)) {
31328 + if ((start >= dev_size) || (len > dev_size - start)) {
31329 DMWARN("%s: %s too small for target: "
31330 "start=%llu, len=%llu, dev_size=%llu",
31331 dm_device_name(ti->table->md), bdevname(bdev, b),
31332 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31333 index 59c4f04..4c7b661 100644
31334 --- a/drivers/md/dm-thin-metadata.c
31335 +++ b/drivers/md/dm-thin-metadata.c
31336 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31337
31338 pmd->info.tm = tm;
31339 pmd->info.levels = 2;
31340 - pmd->info.value_type.context = pmd->data_sm;
31341 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31342 pmd->info.value_type.size = sizeof(__le64);
31343 pmd->info.value_type.inc = data_block_inc;
31344 pmd->info.value_type.dec = data_block_dec;
31345 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31346
31347 pmd->bl_info.tm = tm;
31348 pmd->bl_info.levels = 1;
31349 - pmd->bl_info.value_type.context = pmd->data_sm;
31350 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31351 pmd->bl_info.value_type.size = sizeof(__le64);
31352 pmd->bl_info.value_type.inc = data_block_inc;
31353 pmd->bl_info.value_type.dec = data_block_dec;
31354 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31355 index 4720f68..78d1df7 100644
31356 --- a/drivers/md/dm.c
31357 +++ b/drivers/md/dm.c
31358 @@ -177,9 +177,9 @@ struct mapped_device {
31359 /*
31360 * Event handling.
31361 */
31362 - atomic_t event_nr;
31363 + atomic_unchecked_t event_nr;
31364 wait_queue_head_t eventq;
31365 - atomic_t uevent_seq;
31366 + atomic_unchecked_t uevent_seq;
31367 struct list_head uevent_list;
31368 spinlock_t uevent_lock; /* Protect access to uevent_list */
31369
31370 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31371 rwlock_init(&md->map_lock);
31372 atomic_set(&md->holders, 1);
31373 atomic_set(&md->open_count, 0);
31374 - atomic_set(&md->event_nr, 0);
31375 - atomic_set(&md->uevent_seq, 0);
31376 + atomic_set_unchecked(&md->event_nr, 0);
31377 + atomic_set_unchecked(&md->uevent_seq, 0);
31378 INIT_LIST_HEAD(&md->uevent_list);
31379 spin_lock_init(&md->uevent_lock);
31380
31381 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31382
31383 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31384
31385 - atomic_inc(&md->event_nr);
31386 + atomic_inc_unchecked(&md->event_nr);
31387 wake_up(&md->eventq);
31388 }
31389
31390 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31391
31392 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31393 {
31394 - return atomic_add_return(1, &md->uevent_seq);
31395 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31396 }
31397
31398 uint32_t dm_get_event_nr(struct mapped_device *md)
31399 {
31400 - return atomic_read(&md->event_nr);
31401 + return atomic_read_unchecked(&md->event_nr);
31402 }
31403
31404 int dm_wait_event(struct mapped_device *md, int event_nr)
31405 {
31406 return wait_event_interruptible(md->eventq,
31407 - (event_nr != atomic_read(&md->event_nr)));
31408 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31409 }
31410
31411 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31412 diff --git a/drivers/md/md.c b/drivers/md/md.c
31413 index f47f1f8..b7f559e 100644
31414 --- a/drivers/md/md.c
31415 +++ b/drivers/md/md.c
31416 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31417 * start build, activate spare
31418 */
31419 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31420 -static atomic_t md_event_count;
31421 +static atomic_unchecked_t md_event_count;
31422 void md_new_event(struct mddev *mddev)
31423 {
31424 - atomic_inc(&md_event_count);
31425 + atomic_inc_unchecked(&md_event_count);
31426 wake_up(&md_event_waiters);
31427 }
31428 EXPORT_SYMBOL_GPL(md_new_event);
31429 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31430 */
31431 static void md_new_event_inintr(struct mddev *mddev)
31432 {
31433 - atomic_inc(&md_event_count);
31434 + atomic_inc_unchecked(&md_event_count);
31435 wake_up(&md_event_waiters);
31436 }
31437
31438 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31439
31440 rdev->preferred_minor = 0xffff;
31441 rdev->data_offset = le64_to_cpu(sb->data_offset);
31442 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31443 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31444
31445 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31446 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31447 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31448 else
31449 sb->resync_offset = cpu_to_le64(0);
31450
31451 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31452 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31453
31454 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31455 sb->size = cpu_to_le64(mddev->dev_sectors);
31456 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31457 static ssize_t
31458 errors_show(struct md_rdev *rdev, char *page)
31459 {
31460 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31461 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31462 }
31463
31464 static ssize_t
31465 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31466 char *e;
31467 unsigned long n = simple_strtoul(buf, &e, 10);
31468 if (*buf && (*e == 0 || *e == '\n')) {
31469 - atomic_set(&rdev->corrected_errors, n);
31470 + atomic_set_unchecked(&rdev->corrected_errors, n);
31471 return len;
31472 }
31473 return -EINVAL;
31474 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31475 rdev->sb_loaded = 0;
31476 rdev->bb_page = NULL;
31477 atomic_set(&rdev->nr_pending, 0);
31478 - atomic_set(&rdev->read_errors, 0);
31479 - atomic_set(&rdev->corrected_errors, 0);
31480 + atomic_set_unchecked(&rdev->read_errors, 0);
31481 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31482
31483 INIT_LIST_HEAD(&rdev->same_set);
31484 init_waitqueue_head(&rdev->blocked_wait);
31485 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31486
31487 spin_unlock(&pers_lock);
31488 seq_printf(seq, "\n");
31489 - seq->poll_event = atomic_read(&md_event_count);
31490 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31491 return 0;
31492 }
31493 if (v == (void*)2) {
31494 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31495 chunk_kb ? "KB" : "B");
31496 if (bitmap->file) {
31497 seq_printf(seq, ", file: ");
31498 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31499 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31500 }
31501
31502 seq_printf(seq, "\n");
31503 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31504 return error;
31505
31506 seq = file->private_data;
31507 - seq->poll_event = atomic_read(&md_event_count);
31508 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31509 return error;
31510 }
31511
31512 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31513 /* always allow read */
31514 mask = POLLIN | POLLRDNORM;
31515
31516 - if (seq->poll_event != atomic_read(&md_event_count))
31517 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31518 mask |= POLLERR | POLLPRI;
31519 return mask;
31520 }
31521 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31522 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31523 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31524 (int)part_stat_read(&disk->part0, sectors[1]) -
31525 - atomic_read(&disk->sync_io);
31526 + atomic_read_unchecked(&disk->sync_io);
31527 /* sync IO will cause sync_io to increase before the disk_stats
31528 * as sync_io is counted when a request starts, and
31529 * disk_stats is counted when it completes.
31530 diff --git a/drivers/md/md.h b/drivers/md/md.h
31531 index cf742d9..7c7c745 100644
31532 --- a/drivers/md/md.h
31533 +++ b/drivers/md/md.h
31534 @@ -120,13 +120,13 @@ struct md_rdev {
31535 * only maintained for arrays that
31536 * support hot removal
31537 */
31538 - atomic_t read_errors; /* number of consecutive read errors that
31539 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31540 * we have tried to ignore.
31541 */
31542 struct timespec last_read_error; /* monotonic time since our
31543 * last read error
31544 */
31545 - atomic_t corrected_errors; /* number of corrected read errors,
31546 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31547 * for reporting to userspace and storing
31548 * in superblock.
31549 */
31550 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31551
31552 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31553 {
31554 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31555 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31556 }
31557
31558 struct md_personality
31559 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31560 index 50ed53b..4f29d7d 100644
31561 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31562 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31563 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31564 /*----------------------------------------------------------------*/
31565
31566 struct sm_checker {
31567 - struct dm_space_map sm;
31568 + dm_space_map_no_const sm;
31569
31570 struct count_array old_counts;
31571 struct count_array counts;
31572 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31573 index fc469ba..2d91555 100644
31574 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31575 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31576 @@ -23,7 +23,7 @@
31577 * Space map interface.
31578 */
31579 struct sm_disk {
31580 - struct dm_space_map sm;
31581 + dm_space_map_no_const sm;
31582
31583 struct ll_disk ll;
31584 struct ll_disk old_ll;
31585 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31586 index e89ae5e..062e4c2 100644
31587 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31588 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31589 @@ -43,7 +43,7 @@ struct block_op {
31590 };
31591
31592 struct sm_metadata {
31593 - struct dm_space_map sm;
31594 + dm_space_map_no_const sm;
31595
31596 struct ll_disk ll;
31597 struct ll_disk old_ll;
31598 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31599 index 1cbfc6b..56e1dbb 100644
31600 --- a/drivers/md/persistent-data/dm-space-map.h
31601 +++ b/drivers/md/persistent-data/dm-space-map.h
31602 @@ -60,6 +60,7 @@ struct dm_space_map {
31603 int (*root_size)(struct dm_space_map *sm, size_t *result);
31604 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31605 };
31606 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31607
31608 /*----------------------------------------------------------------*/
31609
31610 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31611 index 7d9e071..015b1d5 100644
31612 --- a/drivers/md/raid1.c
31613 +++ b/drivers/md/raid1.c
31614 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31615 if (r1_sync_page_io(rdev, sect, s,
31616 bio->bi_io_vec[idx].bv_page,
31617 READ) != 0)
31618 - atomic_add(s, &rdev->corrected_errors);
31619 + atomic_add_unchecked(s, &rdev->corrected_errors);
31620 }
31621 sectors -= s;
31622 sect += s;
31623 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31624 test_bit(In_sync, &rdev->flags)) {
31625 if (r1_sync_page_io(rdev, sect, s,
31626 conf->tmppage, READ)) {
31627 - atomic_add(s, &rdev->corrected_errors);
31628 + atomic_add_unchecked(s, &rdev->corrected_errors);
31629 printk(KERN_INFO
31630 "md/raid1:%s: read error corrected "
31631 "(%d sectors at %llu on %s)\n",
31632 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31633 index 685ddf3..955b087 100644
31634 --- a/drivers/md/raid10.c
31635 +++ b/drivers/md/raid10.c
31636 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31637 /* The write handler will notice the lack of
31638 * R10BIO_Uptodate and record any errors etc
31639 */
31640 - atomic_add(r10_bio->sectors,
31641 + atomic_add_unchecked(r10_bio->sectors,
31642 &conf->mirrors[d].rdev->corrected_errors);
31643
31644 /* for reconstruct, we always reschedule after a read.
31645 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31646 {
31647 struct timespec cur_time_mon;
31648 unsigned long hours_since_last;
31649 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31650 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31651
31652 ktime_get_ts(&cur_time_mon);
31653
31654 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31655 * overflowing the shift of read_errors by hours_since_last.
31656 */
31657 if (hours_since_last >= 8 * sizeof(read_errors))
31658 - atomic_set(&rdev->read_errors, 0);
31659 + atomic_set_unchecked(&rdev->read_errors, 0);
31660 else
31661 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31662 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31663 }
31664
31665 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31666 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31667 return;
31668
31669 check_decay_read_errors(mddev, rdev);
31670 - atomic_inc(&rdev->read_errors);
31671 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31672 + atomic_inc_unchecked(&rdev->read_errors);
31673 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31674 char b[BDEVNAME_SIZE];
31675 bdevname(rdev->bdev, b);
31676
31677 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31678 "md/raid10:%s: %s: Raid device exceeded "
31679 "read_error threshold [cur %d:max %d]\n",
31680 mdname(mddev), b,
31681 - atomic_read(&rdev->read_errors), max_read_errors);
31682 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31683 printk(KERN_NOTICE
31684 "md/raid10:%s: %s: Failing raid device\n",
31685 mdname(mddev), b);
31686 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31687 (unsigned long long)(
31688 sect + rdev->data_offset),
31689 bdevname(rdev->bdev, b));
31690 - atomic_add(s, &rdev->corrected_errors);
31691 + atomic_add_unchecked(s, &rdev->corrected_errors);
31692 }
31693
31694 rdev_dec_pending(rdev, mddev);
31695 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31696 index 858fdbb..b2dac95 100644
31697 --- a/drivers/md/raid5.c
31698 +++ b/drivers/md/raid5.c
31699 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31700 (unsigned long long)(sh->sector
31701 + rdev->data_offset),
31702 bdevname(rdev->bdev, b));
31703 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31704 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31705 clear_bit(R5_ReadError, &sh->dev[i].flags);
31706 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31707 }
31708 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31709 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31710 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31711 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31712 } else {
31713 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31714 int retry = 0;
31715 rdev = conf->disks[i].rdev;
31716
31717 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31718 - atomic_inc(&rdev->read_errors);
31719 + atomic_inc_unchecked(&rdev->read_errors);
31720 if (conf->mddev->degraded >= conf->max_degraded)
31721 printk_ratelimited(
31722 KERN_WARNING
31723 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31724 (unsigned long long)(sh->sector
31725 + rdev->data_offset),
31726 bdn);
31727 - else if (atomic_read(&rdev->read_errors)
31728 + else if (atomic_read_unchecked(&rdev->read_errors)
31729 > conf->max_nr_stripes)
31730 printk(KERN_WARNING
31731 "md/raid:%s: Too many read errors, failing device %s.\n",
31732 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31733 index ba9a643..e474ab5 100644
31734 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31735 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31736 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31737 .subvendor = _subvend, .subdevice = _subdev, \
31738 .driver_data = (unsigned long)&_driverdata }
31739
31740 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31741 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31742 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31743 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31744 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31745 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31746 index a7d876f..8c21b61 100644
31747 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31748 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31749 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31750 union {
31751 dmx_ts_cb ts;
31752 dmx_section_cb sec;
31753 - } cb;
31754 + } __no_const cb;
31755
31756 struct dvb_demux *demux;
31757 void *priv;
31758 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31759 index f732877..d38c35a 100644
31760 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31761 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31762 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31763 const struct dvb_device *template, void *priv, int type)
31764 {
31765 struct dvb_device *dvbdev;
31766 - struct file_operations *dvbdevfops;
31767 + file_operations_no_const *dvbdevfops;
31768 struct device *clsdev;
31769 int minor;
31770 int id;
31771 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31772 index 9f2a02c..5920f88 100644
31773 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31774 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31775 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31776 struct dib0700_adapter_state {
31777 int (*set_param_save) (struct dvb_frontend *,
31778 struct dvb_frontend_parameters *);
31779 -};
31780 +} __no_const;
31781
31782 static int dib7070_set_param_override(struct dvb_frontend *fe,
31783 struct dvb_frontend_parameters *fep)
31784 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31785 index f103ec1..5e8968b 100644
31786 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31787 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31788 @@ -95,7 +95,7 @@ struct su3000_state {
31789
31790 struct s6x0_state {
31791 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31792 -};
31793 +} __no_const;
31794
31795 /* debug */
31796 static int dvb_usb_dw2102_debug;
31797 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31798 index 404f63a..4796533 100644
31799 --- a/drivers/media/dvb/frontends/dib3000.h
31800 +++ b/drivers/media/dvb/frontends/dib3000.h
31801 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31802 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31803 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31804 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31805 -};
31806 +} __no_const;
31807
31808 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31809 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31810 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31811 index 90bf573..e8463da 100644
31812 --- a/drivers/media/dvb/frontends/ds3000.c
31813 +++ b/drivers/media/dvb/frontends/ds3000.c
31814 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31815
31816 for (i = 0; i < 30 ; i++) {
31817 ds3000_read_status(fe, &status);
31818 - if (status && FE_HAS_LOCK)
31819 + if (status & FE_HAS_LOCK)
31820 break;
31821
31822 msleep(10);
31823 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31824 index 0564192..75b16f5 100644
31825 --- a/drivers/media/dvb/ngene/ngene-cards.c
31826 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31827 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31828
31829 /****************************************************************************/
31830
31831 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31832 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31833 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31834 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31835 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31836 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31837 index 16a089f..ab1667d 100644
31838 --- a/drivers/media/radio/radio-cadet.c
31839 +++ b/drivers/media/radio/radio-cadet.c
31840 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31841 unsigned char readbuf[RDS_BUFFER];
31842 int i = 0;
31843
31844 + if (count > RDS_BUFFER)
31845 + return -EFAULT;
31846 mutex_lock(&dev->lock);
31847 if (dev->rdsstat == 0) {
31848 dev->rdsstat = 1;
31849 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31850 index 61287fc..8b08712 100644
31851 --- a/drivers/media/rc/redrat3.c
31852 +++ b/drivers/media/rc/redrat3.c
31853 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31854 return carrier;
31855 }
31856
31857 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31858 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31859 {
31860 struct redrat3_dev *rr3 = rcdev->priv;
31861 struct device *dev = rr3->dev;
31862 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31863 index 9cde353..8c6a1c3 100644
31864 --- a/drivers/media/video/au0828/au0828.h
31865 +++ b/drivers/media/video/au0828/au0828.h
31866 @@ -191,7 +191,7 @@ struct au0828_dev {
31867
31868 /* I2C */
31869 struct i2c_adapter i2c_adap;
31870 - struct i2c_algorithm i2c_algo;
31871 + i2c_algorithm_no_const i2c_algo;
31872 struct i2c_client i2c_client;
31873 u32 i2c_rc;
31874
31875 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31876 index 68d1240..46b32eb 100644
31877 --- a/drivers/media/video/cx88/cx88-alsa.c
31878 +++ b/drivers/media/video/cx88/cx88-alsa.c
31879 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31880 * Only boards with eeprom and byte 1 at eeprom=1 have it
31881 */
31882
31883 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31884 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31885 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31886 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31887 {0, }
31888 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31889 index 305e6aa..0143317 100644
31890 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31891 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31892 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31893
31894 /* I2C stuff */
31895 struct i2c_adapter i2c_adap;
31896 - struct i2c_algorithm i2c_algo;
31897 + i2c_algorithm_no_const i2c_algo;
31898 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31899 int i2c_cx25840_hack_state;
31900 int i2c_linked;
31901 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31902 index a0895bf..b7ebb1b 100644
31903 --- a/drivers/media/video/timblogiw.c
31904 +++ b/drivers/media/video/timblogiw.c
31905 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31906
31907 /* Platform device functions */
31908
31909 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31910 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31911 .vidioc_querycap = timblogiw_querycap,
31912 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31913 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31914 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31915 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31916 };
31917
31918 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31919 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31920 .owner = THIS_MODULE,
31921 .open = timblogiw_open,
31922 .release = timblogiw_close,
31923 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31924 index e9c6a60..daf6a33 100644
31925 --- a/drivers/message/fusion/mptbase.c
31926 +++ b/drivers/message/fusion/mptbase.c
31927 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31928 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31929 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31930
31931 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31932 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31933 +#else
31934 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31935 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31936 +#endif
31937 +
31938 /*
31939 * Rounding UP to nearest 4-kB boundary here...
31940 */
31941 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31942 index 9d95042..b808101 100644
31943 --- a/drivers/message/fusion/mptsas.c
31944 +++ b/drivers/message/fusion/mptsas.c
31945 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31946 return 0;
31947 }
31948
31949 +static inline void
31950 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31951 +{
31952 + if (phy_info->port_details) {
31953 + phy_info->port_details->rphy = rphy;
31954 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31955 + ioc->name, rphy));
31956 + }
31957 +
31958 + if (rphy) {
31959 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31960 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31961 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31962 + ioc->name, rphy, rphy->dev.release));
31963 + }
31964 +}
31965 +
31966 /* no mutex */
31967 static void
31968 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31969 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31970 return NULL;
31971 }
31972
31973 -static inline void
31974 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31975 -{
31976 - if (phy_info->port_details) {
31977 - phy_info->port_details->rphy = rphy;
31978 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31979 - ioc->name, rphy));
31980 - }
31981 -
31982 - if (rphy) {
31983 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31984 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31985 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31986 - ioc->name, rphy, rphy->dev.release));
31987 - }
31988 -}
31989 -
31990 static inline struct sas_port *
31991 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31992 {
31993 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31994 index 0c3ced7..1fe34ec 100644
31995 --- a/drivers/message/fusion/mptscsih.c
31996 +++ b/drivers/message/fusion/mptscsih.c
31997 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31998
31999 h = shost_priv(SChost);
32000
32001 - if (h) {
32002 - if (h->info_kbuf == NULL)
32003 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32004 - return h->info_kbuf;
32005 - h->info_kbuf[0] = '\0';
32006 + if (!h)
32007 + return NULL;
32008
32009 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32010 - h->info_kbuf[size-1] = '\0';
32011 - }
32012 + if (h->info_kbuf == NULL)
32013 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32014 + return h->info_kbuf;
32015 + h->info_kbuf[0] = '\0';
32016 +
32017 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32018 + h->info_kbuf[size-1] = '\0';
32019
32020 return h->info_kbuf;
32021 }
32022 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32023 index 07dbeaf..5533142 100644
32024 --- a/drivers/message/i2o/i2o_proc.c
32025 +++ b/drivers/message/i2o/i2o_proc.c
32026 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32027 "Array Controller Device"
32028 };
32029
32030 -static char *chtostr(u8 * chars, int n)
32031 -{
32032 - char tmp[256];
32033 - tmp[0] = 0;
32034 - return strncat(tmp, (char *)chars, n);
32035 -}
32036 -
32037 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32038 char *group)
32039 {
32040 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32041
32042 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32043 seq_printf(seq, "%-#8x", ddm_table.module_id);
32044 - seq_printf(seq, "%-29s",
32045 - chtostr(ddm_table.module_name_version, 28));
32046 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32047 seq_printf(seq, "%9d ", ddm_table.data_size);
32048 seq_printf(seq, "%8d", ddm_table.code_size);
32049
32050 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32051
32052 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32053 seq_printf(seq, "%-#8x", dst->module_id);
32054 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32055 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32056 + seq_printf(seq, "%-.28s", dst->module_name_version);
32057 + seq_printf(seq, "%-.8s", dst->date);
32058 seq_printf(seq, "%8d ", dst->module_size);
32059 seq_printf(seq, "%8d ", dst->mpb_size);
32060 seq_printf(seq, "0x%04x", dst->module_flags);
32061 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32062 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32063 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32064 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32065 - seq_printf(seq, "Vendor info : %s\n",
32066 - chtostr((u8 *) (work32 + 2), 16));
32067 - seq_printf(seq, "Product info : %s\n",
32068 - chtostr((u8 *) (work32 + 6), 16));
32069 - seq_printf(seq, "Description : %s\n",
32070 - chtostr((u8 *) (work32 + 10), 16));
32071 - seq_printf(seq, "Product rev. : %s\n",
32072 - chtostr((u8 *) (work32 + 14), 8));
32073 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32074 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32075 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32076 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32077
32078 seq_printf(seq, "Serial number : ");
32079 print_serial_number(seq, (u8 *) (work32 + 16),
32080 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32081 }
32082
32083 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32084 - seq_printf(seq, "Module name : %s\n",
32085 - chtostr(result.module_name, 24));
32086 - seq_printf(seq, "Module revision : %s\n",
32087 - chtostr(result.module_rev, 8));
32088 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32089 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32090
32091 seq_printf(seq, "Serial number : ");
32092 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32093 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32094 return 0;
32095 }
32096
32097 - seq_printf(seq, "Device name : %s\n",
32098 - chtostr(result.device_name, 64));
32099 - seq_printf(seq, "Service name : %s\n",
32100 - chtostr(result.service_name, 64));
32101 - seq_printf(seq, "Physical name : %s\n",
32102 - chtostr(result.physical_location, 64));
32103 - seq_printf(seq, "Instance number : %s\n",
32104 - chtostr(result.instance_number, 4));
32105 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32106 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32107 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32108 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32109
32110 return 0;
32111 }
32112 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32113 index a8c08f3..155fe3d 100644
32114 --- a/drivers/message/i2o/iop.c
32115 +++ b/drivers/message/i2o/iop.c
32116 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32117
32118 spin_lock_irqsave(&c->context_list_lock, flags);
32119
32120 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32121 - atomic_inc(&c->context_list_counter);
32122 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32123 + atomic_inc_unchecked(&c->context_list_counter);
32124
32125 - entry->context = atomic_read(&c->context_list_counter);
32126 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32127
32128 list_add(&entry->list, &c->context_list);
32129
32130 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32131
32132 #if BITS_PER_LONG == 64
32133 spin_lock_init(&c->context_list_lock);
32134 - atomic_set(&c->context_list_counter, 0);
32135 + atomic_set_unchecked(&c->context_list_counter, 0);
32136 INIT_LIST_HEAD(&c->context_list);
32137 #endif
32138
32139 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32140 index 7ce65f4..e66e9bc 100644
32141 --- a/drivers/mfd/abx500-core.c
32142 +++ b/drivers/mfd/abx500-core.c
32143 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
32144
32145 struct abx500_device_entry {
32146 struct list_head list;
32147 - struct abx500_ops ops;
32148 + abx500_ops_no_const ops;
32149 struct device *dev;
32150 };
32151
32152 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32153 index 5c2a06a..8fa077c 100644
32154 --- a/drivers/mfd/janz-cmodio.c
32155 +++ b/drivers/mfd/janz-cmodio.c
32156 @@ -13,6 +13,7 @@
32157
32158 #include <linux/kernel.h>
32159 #include <linux/module.h>
32160 +#include <linux/slab.h>
32161 #include <linux/init.h>
32162 #include <linux/pci.h>
32163 #include <linux/interrupt.h>
32164 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32165 index 29d12a7..f900ba4 100644
32166 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32167 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32168 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
32169 * the lid is closed. This leads to interrupts as soon as a little move
32170 * is done.
32171 */
32172 - atomic_inc(&lis3->count);
32173 + atomic_inc_unchecked(&lis3->count);
32174
32175 wake_up_interruptible(&lis3->misc_wait);
32176 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
32177 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32178 if (lis3->pm_dev)
32179 pm_runtime_get_sync(lis3->pm_dev);
32180
32181 - atomic_set(&lis3->count, 0);
32182 + atomic_set_unchecked(&lis3->count, 0);
32183 return 0;
32184 }
32185
32186 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32187 add_wait_queue(&lis3->misc_wait, &wait);
32188 while (true) {
32189 set_current_state(TASK_INTERRUPTIBLE);
32190 - data = atomic_xchg(&lis3->count, 0);
32191 + data = atomic_xchg_unchecked(&lis3->count, 0);
32192 if (data)
32193 break;
32194
32195 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32196 struct lis3lv02d, miscdev);
32197
32198 poll_wait(file, &lis3->misc_wait, wait);
32199 - if (atomic_read(&lis3->count))
32200 + if (atomic_read_unchecked(&lis3->count))
32201 return POLLIN | POLLRDNORM;
32202 return 0;
32203 }
32204 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32205 index 2b1482a..5d33616 100644
32206 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32207 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32208 @@ -266,7 +266,7 @@ struct lis3lv02d {
32209 struct input_polled_dev *idev; /* input device */
32210 struct platform_device *pdev; /* platform device */
32211 struct regulator_bulk_data regulators[2];
32212 - atomic_t count; /* interrupt count after last read */
32213 + atomic_unchecked_t count; /* interrupt count after last read */
32214 union axis_conversion ac; /* hw -> logical axis */
32215 int mapped_btns[3];
32216
32217 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32218 index 2f30bad..c4c13d0 100644
32219 --- a/drivers/misc/sgi-gru/gruhandles.c
32220 +++ b/drivers/misc/sgi-gru/gruhandles.c
32221 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32222 unsigned long nsec;
32223
32224 nsec = CLKS2NSEC(clks);
32225 - atomic_long_inc(&mcs_op_statistics[op].count);
32226 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32227 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32228 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32229 if (mcs_op_statistics[op].max < nsec)
32230 mcs_op_statistics[op].max = nsec;
32231 }
32232 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32233 index 7768b87..f8aac38 100644
32234 --- a/drivers/misc/sgi-gru/gruprocfs.c
32235 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32236 @@ -32,9 +32,9 @@
32237
32238 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32239
32240 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32241 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32242 {
32243 - unsigned long val = atomic_long_read(v);
32244 + unsigned long val = atomic_long_read_unchecked(v);
32245
32246 seq_printf(s, "%16lu %s\n", val, id);
32247 }
32248 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32249
32250 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32251 for (op = 0; op < mcsop_last; op++) {
32252 - count = atomic_long_read(&mcs_op_statistics[op].count);
32253 - total = atomic_long_read(&mcs_op_statistics[op].total);
32254 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32255 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32256 max = mcs_op_statistics[op].max;
32257 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32258 count ? total / count : 0, max);
32259 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32260 index 5c3ce24..4915ccb 100644
32261 --- a/drivers/misc/sgi-gru/grutables.h
32262 +++ b/drivers/misc/sgi-gru/grutables.h
32263 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32264 * GRU statistics.
32265 */
32266 struct gru_stats_s {
32267 - atomic_long_t vdata_alloc;
32268 - atomic_long_t vdata_free;
32269 - atomic_long_t gts_alloc;
32270 - atomic_long_t gts_free;
32271 - atomic_long_t gms_alloc;
32272 - atomic_long_t gms_free;
32273 - atomic_long_t gts_double_allocate;
32274 - atomic_long_t assign_context;
32275 - atomic_long_t assign_context_failed;
32276 - atomic_long_t free_context;
32277 - atomic_long_t load_user_context;
32278 - atomic_long_t load_kernel_context;
32279 - atomic_long_t lock_kernel_context;
32280 - atomic_long_t unlock_kernel_context;
32281 - atomic_long_t steal_user_context;
32282 - atomic_long_t steal_kernel_context;
32283 - atomic_long_t steal_context_failed;
32284 - atomic_long_t nopfn;
32285 - atomic_long_t asid_new;
32286 - atomic_long_t asid_next;
32287 - atomic_long_t asid_wrap;
32288 - atomic_long_t asid_reuse;
32289 - atomic_long_t intr;
32290 - atomic_long_t intr_cbr;
32291 - atomic_long_t intr_tfh;
32292 - atomic_long_t intr_spurious;
32293 - atomic_long_t intr_mm_lock_failed;
32294 - atomic_long_t call_os;
32295 - atomic_long_t call_os_wait_queue;
32296 - atomic_long_t user_flush_tlb;
32297 - atomic_long_t user_unload_context;
32298 - atomic_long_t user_exception;
32299 - atomic_long_t set_context_option;
32300 - atomic_long_t check_context_retarget_intr;
32301 - atomic_long_t check_context_unload;
32302 - atomic_long_t tlb_dropin;
32303 - atomic_long_t tlb_preload_page;
32304 - atomic_long_t tlb_dropin_fail_no_asid;
32305 - atomic_long_t tlb_dropin_fail_upm;
32306 - atomic_long_t tlb_dropin_fail_invalid;
32307 - atomic_long_t tlb_dropin_fail_range_active;
32308 - atomic_long_t tlb_dropin_fail_idle;
32309 - atomic_long_t tlb_dropin_fail_fmm;
32310 - atomic_long_t tlb_dropin_fail_no_exception;
32311 - atomic_long_t tfh_stale_on_fault;
32312 - atomic_long_t mmu_invalidate_range;
32313 - atomic_long_t mmu_invalidate_page;
32314 - atomic_long_t flush_tlb;
32315 - atomic_long_t flush_tlb_gru;
32316 - atomic_long_t flush_tlb_gru_tgh;
32317 - atomic_long_t flush_tlb_gru_zero_asid;
32318 + atomic_long_unchecked_t vdata_alloc;
32319 + atomic_long_unchecked_t vdata_free;
32320 + atomic_long_unchecked_t gts_alloc;
32321 + atomic_long_unchecked_t gts_free;
32322 + atomic_long_unchecked_t gms_alloc;
32323 + atomic_long_unchecked_t gms_free;
32324 + atomic_long_unchecked_t gts_double_allocate;
32325 + atomic_long_unchecked_t assign_context;
32326 + atomic_long_unchecked_t assign_context_failed;
32327 + atomic_long_unchecked_t free_context;
32328 + atomic_long_unchecked_t load_user_context;
32329 + atomic_long_unchecked_t load_kernel_context;
32330 + atomic_long_unchecked_t lock_kernel_context;
32331 + atomic_long_unchecked_t unlock_kernel_context;
32332 + atomic_long_unchecked_t steal_user_context;
32333 + atomic_long_unchecked_t steal_kernel_context;
32334 + atomic_long_unchecked_t steal_context_failed;
32335 + atomic_long_unchecked_t nopfn;
32336 + atomic_long_unchecked_t asid_new;
32337 + atomic_long_unchecked_t asid_next;
32338 + atomic_long_unchecked_t asid_wrap;
32339 + atomic_long_unchecked_t asid_reuse;
32340 + atomic_long_unchecked_t intr;
32341 + atomic_long_unchecked_t intr_cbr;
32342 + atomic_long_unchecked_t intr_tfh;
32343 + atomic_long_unchecked_t intr_spurious;
32344 + atomic_long_unchecked_t intr_mm_lock_failed;
32345 + atomic_long_unchecked_t call_os;
32346 + atomic_long_unchecked_t call_os_wait_queue;
32347 + atomic_long_unchecked_t user_flush_tlb;
32348 + atomic_long_unchecked_t user_unload_context;
32349 + atomic_long_unchecked_t user_exception;
32350 + atomic_long_unchecked_t set_context_option;
32351 + atomic_long_unchecked_t check_context_retarget_intr;
32352 + atomic_long_unchecked_t check_context_unload;
32353 + atomic_long_unchecked_t tlb_dropin;
32354 + atomic_long_unchecked_t tlb_preload_page;
32355 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32356 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32357 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32358 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32359 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32360 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32361 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32362 + atomic_long_unchecked_t tfh_stale_on_fault;
32363 + atomic_long_unchecked_t mmu_invalidate_range;
32364 + atomic_long_unchecked_t mmu_invalidate_page;
32365 + atomic_long_unchecked_t flush_tlb;
32366 + atomic_long_unchecked_t flush_tlb_gru;
32367 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32368 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32369
32370 - atomic_long_t copy_gpa;
32371 - atomic_long_t read_gpa;
32372 + atomic_long_unchecked_t copy_gpa;
32373 + atomic_long_unchecked_t read_gpa;
32374
32375 - atomic_long_t mesq_receive;
32376 - atomic_long_t mesq_receive_none;
32377 - atomic_long_t mesq_send;
32378 - atomic_long_t mesq_send_failed;
32379 - atomic_long_t mesq_noop;
32380 - atomic_long_t mesq_send_unexpected_error;
32381 - atomic_long_t mesq_send_lb_overflow;
32382 - atomic_long_t mesq_send_qlimit_reached;
32383 - atomic_long_t mesq_send_amo_nacked;
32384 - atomic_long_t mesq_send_put_nacked;
32385 - atomic_long_t mesq_page_overflow;
32386 - atomic_long_t mesq_qf_locked;
32387 - atomic_long_t mesq_qf_noop_not_full;
32388 - atomic_long_t mesq_qf_switch_head_failed;
32389 - atomic_long_t mesq_qf_unexpected_error;
32390 - atomic_long_t mesq_noop_unexpected_error;
32391 - atomic_long_t mesq_noop_lb_overflow;
32392 - atomic_long_t mesq_noop_qlimit_reached;
32393 - atomic_long_t mesq_noop_amo_nacked;
32394 - atomic_long_t mesq_noop_put_nacked;
32395 - atomic_long_t mesq_noop_page_overflow;
32396 + atomic_long_unchecked_t mesq_receive;
32397 + atomic_long_unchecked_t mesq_receive_none;
32398 + atomic_long_unchecked_t mesq_send;
32399 + atomic_long_unchecked_t mesq_send_failed;
32400 + atomic_long_unchecked_t mesq_noop;
32401 + atomic_long_unchecked_t mesq_send_unexpected_error;
32402 + atomic_long_unchecked_t mesq_send_lb_overflow;
32403 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32404 + atomic_long_unchecked_t mesq_send_amo_nacked;
32405 + atomic_long_unchecked_t mesq_send_put_nacked;
32406 + atomic_long_unchecked_t mesq_page_overflow;
32407 + atomic_long_unchecked_t mesq_qf_locked;
32408 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32409 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32410 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32411 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32412 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32413 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32414 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32415 + atomic_long_unchecked_t mesq_noop_put_nacked;
32416 + atomic_long_unchecked_t mesq_noop_page_overflow;
32417
32418 };
32419
32420 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32421 tghop_invalidate, mcsop_last};
32422
32423 struct mcs_op_statistic {
32424 - atomic_long_t count;
32425 - atomic_long_t total;
32426 + atomic_long_unchecked_t count;
32427 + atomic_long_unchecked_t total;
32428 unsigned long max;
32429 };
32430
32431 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32432
32433 #define STAT(id) do { \
32434 if (gru_options & OPT_STATS) \
32435 - atomic_long_inc(&gru_stats.id); \
32436 + atomic_long_inc_unchecked(&gru_stats.id); \
32437 } while (0)
32438
32439 #ifdef CONFIG_SGI_GRU_DEBUG
32440 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32441 index 851b2f2..a4ec097 100644
32442 --- a/drivers/misc/sgi-xp/xp.h
32443 +++ b/drivers/misc/sgi-xp/xp.h
32444 @@ -289,7 +289,7 @@ struct xpc_interface {
32445 xpc_notify_func, void *);
32446 void (*received) (short, int, void *);
32447 enum xp_retval (*partid_to_nasids) (short, void *);
32448 -};
32449 +} __no_const;
32450
32451 extern struct xpc_interface xpc_interface;
32452
32453 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32454 index b94d5f7..7f494c5 100644
32455 --- a/drivers/misc/sgi-xp/xpc.h
32456 +++ b/drivers/misc/sgi-xp/xpc.h
32457 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32458 void (*received_payload) (struct xpc_channel *, void *);
32459 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32460 };
32461 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32462
32463 /* struct xpc_partition act_state values (for XPC HB) */
32464
32465 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32466 /* found in xpc_main.c */
32467 extern struct device *xpc_part;
32468 extern struct device *xpc_chan;
32469 -extern struct xpc_arch_operations xpc_arch_ops;
32470 +extern xpc_arch_operations_no_const xpc_arch_ops;
32471 extern int xpc_disengage_timelimit;
32472 extern int xpc_disengage_timedout;
32473 extern int xpc_activate_IRQ_rcvd;
32474 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32475 index 8d082b4..aa749ae 100644
32476 --- a/drivers/misc/sgi-xp/xpc_main.c
32477 +++ b/drivers/misc/sgi-xp/xpc_main.c
32478 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32479 .notifier_call = xpc_system_die,
32480 };
32481
32482 -struct xpc_arch_operations xpc_arch_ops;
32483 +xpc_arch_operations_no_const xpc_arch_ops;
32484
32485 /*
32486 * Timer function to enforce the timelimit on the partition disengage.
32487 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32488 index 6878a94..fe5c5f1 100644
32489 --- a/drivers/mmc/host/sdhci-pci.c
32490 +++ b/drivers/mmc/host/sdhci-pci.c
32491 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32492 .probe = via_probe,
32493 };
32494
32495 -static const struct pci_device_id pci_ids[] __devinitdata = {
32496 +static const struct pci_device_id pci_ids[] __devinitconst = {
32497 {
32498 .vendor = PCI_VENDOR_ID_RICOH,
32499 .device = PCI_DEVICE_ID_RICOH_R5C822,
32500 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32501 index e9fad91..0a7a16a 100644
32502 --- a/drivers/mtd/devices/doc2000.c
32503 +++ b/drivers/mtd/devices/doc2000.c
32504 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32505
32506 /* The ECC will not be calculated correctly if less than 512 is written */
32507 /* DBB-
32508 - if (len != 0x200 && eccbuf)
32509 + if (len != 0x200)
32510 printk(KERN_WARNING
32511 "ECC needs a full sector write (adr: %lx size %lx)\n",
32512 (long) to, (long) len);
32513 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32514 index a3f7a27..234016e 100644
32515 --- a/drivers/mtd/devices/doc2001.c
32516 +++ b/drivers/mtd/devices/doc2001.c
32517 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32518 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32519
32520 /* Don't allow read past end of device */
32521 - if (from >= this->totlen)
32522 + if (from >= this->totlen || !len)
32523 return -EINVAL;
32524
32525 /* Don't allow a single read to cross a 512-byte block boundary */
32526 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32527 index 3984d48..28aa897 100644
32528 --- a/drivers/mtd/nand/denali.c
32529 +++ b/drivers/mtd/nand/denali.c
32530 @@ -26,6 +26,7 @@
32531 #include <linux/pci.h>
32532 #include <linux/mtd/mtd.h>
32533 #include <linux/module.h>
32534 +#include <linux/slab.h>
32535
32536 #include "denali.h"
32537
32538 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32539 index ac40925..483b753 100644
32540 --- a/drivers/mtd/nftlmount.c
32541 +++ b/drivers/mtd/nftlmount.c
32542 @@ -24,6 +24,7 @@
32543 #include <asm/errno.h>
32544 #include <linux/delay.h>
32545 #include <linux/slab.h>
32546 +#include <linux/sched.h>
32547 #include <linux/mtd/mtd.h>
32548 #include <linux/mtd/nand.h>
32549 #include <linux/mtd/nftl.h>
32550 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32551 index 6c3fb5a..c542a81 100644
32552 --- a/drivers/mtd/ubi/build.c
32553 +++ b/drivers/mtd/ubi/build.c
32554 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32555 static int __init bytes_str_to_int(const char *str)
32556 {
32557 char *endp;
32558 - unsigned long result;
32559 + unsigned long result, scale = 1;
32560
32561 result = simple_strtoul(str, &endp, 0);
32562 if (str == endp || result >= INT_MAX) {
32563 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32564
32565 switch (*endp) {
32566 case 'G':
32567 - result *= 1024;
32568 + scale *= 1024;
32569 case 'M':
32570 - result *= 1024;
32571 + scale *= 1024;
32572 case 'K':
32573 - result *= 1024;
32574 + scale *= 1024;
32575 if (endp[1] == 'i' && endp[2] == 'B')
32576 endp += 2;
32577 case '\0':
32578 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32579 return -EINVAL;
32580 }
32581
32582 - return result;
32583 + if ((intoverflow_t)result*scale >= INT_MAX) {
32584 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32585 + str);
32586 + return -EINVAL;
32587 + }
32588 +
32589 + return result*scale;
32590 }
32591
32592 /**
32593 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32594 index 1feae59..c2a61d2 100644
32595 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32596 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32597 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32598 */
32599
32600 #define ATL2_PARAM(X, desc) \
32601 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32602 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32603 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32604 MODULE_PARM_DESC(X, desc);
32605 #else
32606 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32607 index 9a517c2..a50cfcb 100644
32608 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32609 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32610 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32611
32612 int (*wait_comp)(struct bnx2x *bp,
32613 struct bnx2x_rx_mode_ramrod_params *p);
32614 -};
32615 +} __no_const;
32616
32617 /********************** Set multicast group ***********************************/
32618
32619 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32620 index 94b4bd0..73c02de 100644
32621 --- a/drivers/net/ethernet/broadcom/tg3.h
32622 +++ b/drivers/net/ethernet/broadcom/tg3.h
32623 @@ -134,6 +134,7 @@
32624 #define CHIPREV_ID_5750_A0 0x4000
32625 #define CHIPREV_ID_5750_A1 0x4001
32626 #define CHIPREV_ID_5750_A3 0x4003
32627 +#define CHIPREV_ID_5750_C1 0x4201
32628 #define CHIPREV_ID_5750_C2 0x4202
32629 #define CHIPREV_ID_5752_A0_HW 0x5000
32630 #define CHIPREV_ID_5752_A0 0x6000
32631 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32632 index c5f5479..2e8c260 100644
32633 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32634 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32635 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32636 */
32637 struct l2t_skb_cb {
32638 arp_failure_handler_func arp_failure_handler;
32639 -};
32640 +} __no_const;
32641
32642 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32643
32644 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32645 index 871bcaa..4043505 100644
32646 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32647 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32648 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32649 for (i=0; i<ETH_ALEN; i++) {
32650 tmp.addr[i] = dev->dev_addr[i];
32651 }
32652 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32653 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32654 break;
32655
32656 case DE4X5_SET_HWADDR: /* Set the hardware address */
32657 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32658 spin_lock_irqsave(&lp->lock, flags);
32659 memcpy(&statbuf, &lp->pktStats, ioc->len);
32660 spin_unlock_irqrestore(&lp->lock, flags);
32661 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32662 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32663 return -EFAULT;
32664 break;
32665 }
32666 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32667 index 14d5b61..1398636 100644
32668 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32669 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32670 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32671 {NULL}};
32672
32673
32674 -static const char *block_name[] __devinitdata = {
32675 +static const char *block_name[] __devinitconst = {
32676 "21140 non-MII",
32677 "21140 MII PHY",
32678 "21142 Serial PHY",
32679 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32680 index 4d01219..b58d26d 100644
32681 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32682 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32683 @@ -236,7 +236,7 @@ struct pci_id_info {
32684 int drv_flags; /* Driver use, intended as capability flags. */
32685 };
32686
32687 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32688 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32689 { /* Sometime a Level-One switch card. */
32690 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32691 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32692 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32693 index dcd7f7a..ecb7fb3 100644
32694 --- a/drivers/net/ethernet/dlink/sundance.c
32695 +++ b/drivers/net/ethernet/dlink/sundance.c
32696 @@ -218,7 +218,7 @@ enum {
32697 struct pci_id_info {
32698 const char *name;
32699 };
32700 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32701 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32702 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32703 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32704 {"D-Link DFE-580TX 4 port Server Adapter"},
32705 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32706 index bf266a0..e024af7 100644
32707 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32708 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32709 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32710
32711 if (wrapped)
32712 newacc += 65536;
32713 - ACCESS_ONCE(*acc) = newacc;
32714 + ACCESS_ONCE_RW(*acc) = newacc;
32715 }
32716
32717 void be_parse_stats(struct be_adapter *adapter)
32718 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32719 index 61d2bdd..7f1154a 100644
32720 --- a/drivers/net/ethernet/fealnx.c
32721 +++ b/drivers/net/ethernet/fealnx.c
32722 @@ -150,7 +150,7 @@ struct chip_info {
32723 int flags;
32724 };
32725
32726 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32727 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32728 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32729 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32730 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32731 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32732 index e1159e5..e18684d 100644
32733 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32734 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32735 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32736 {
32737 struct e1000_hw *hw = &adapter->hw;
32738 struct e1000_mac_info *mac = &hw->mac;
32739 - struct e1000_mac_operations *func = &mac->ops;
32740 + e1000_mac_operations_no_const *func = &mac->ops;
32741
32742 /* Set media type */
32743 switch (adapter->pdev->device) {
32744 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32745 index a3e65fd..f451444 100644
32746 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32747 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32748 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32749 {
32750 struct e1000_hw *hw = &adapter->hw;
32751 struct e1000_mac_info *mac = &hw->mac;
32752 - struct e1000_mac_operations *func = &mac->ops;
32753 + e1000_mac_operations_no_const *func = &mac->ops;
32754 u32 swsm = 0;
32755 u32 swsm2 = 0;
32756 bool force_clear_smbi = false;
32757 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32758 index 2967039..ca8c40c 100644
32759 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32760 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32761 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32762 void (*write_vfta)(struct e1000_hw *, u32, u32);
32763 s32 (*read_mac_addr)(struct e1000_hw *);
32764 };
32765 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32766
32767 /*
32768 * When to use various PHY register access functions:
32769 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32770 void (*power_up)(struct e1000_hw *);
32771 void (*power_down)(struct e1000_hw *);
32772 };
32773 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32774
32775 /* Function pointers for the NVM. */
32776 struct e1000_nvm_operations {
32777 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32778 s32 (*validate)(struct e1000_hw *);
32779 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32780 };
32781 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32782
32783 struct e1000_mac_info {
32784 - struct e1000_mac_operations ops;
32785 + e1000_mac_operations_no_const ops;
32786 u8 addr[ETH_ALEN];
32787 u8 perm_addr[ETH_ALEN];
32788
32789 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32790 };
32791
32792 struct e1000_phy_info {
32793 - struct e1000_phy_operations ops;
32794 + e1000_phy_operations_no_const ops;
32795
32796 enum e1000_phy_type type;
32797
32798 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32799 };
32800
32801 struct e1000_nvm_info {
32802 - struct e1000_nvm_operations ops;
32803 + e1000_nvm_operations_no_const ops;
32804
32805 enum e1000_nvm_type type;
32806 enum e1000_nvm_override override;
32807 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32808 index 4519a13..f97fcd0 100644
32809 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32810 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32811 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32812 s32 (*read_mac_addr)(struct e1000_hw *);
32813 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32814 };
32815 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32816
32817 struct e1000_phy_operations {
32818 s32 (*acquire)(struct e1000_hw *);
32819 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32820 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32821 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32822 };
32823 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32824
32825 struct e1000_nvm_operations {
32826 s32 (*acquire)(struct e1000_hw *);
32827 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32828 s32 (*update)(struct e1000_hw *);
32829 s32 (*validate)(struct e1000_hw *);
32830 };
32831 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32832
32833 struct e1000_info {
32834 s32 (*get_invariants)(struct e1000_hw *);
32835 @@ -350,7 +353,7 @@ struct e1000_info {
32836 extern const struct e1000_info e1000_82575_info;
32837
32838 struct e1000_mac_info {
32839 - struct e1000_mac_operations ops;
32840 + e1000_mac_operations_no_const ops;
32841
32842 u8 addr[6];
32843 u8 perm_addr[6];
32844 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32845 };
32846
32847 struct e1000_phy_info {
32848 - struct e1000_phy_operations ops;
32849 + e1000_phy_operations_no_const ops;
32850
32851 enum e1000_phy_type type;
32852
32853 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32854 };
32855
32856 struct e1000_nvm_info {
32857 - struct e1000_nvm_operations ops;
32858 + e1000_nvm_operations_no_const ops;
32859 enum e1000_nvm_type type;
32860 enum e1000_nvm_override override;
32861
32862 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32863 s32 (*check_for_ack)(struct e1000_hw *, u16);
32864 s32 (*check_for_rst)(struct e1000_hw *, u16);
32865 };
32866 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32867
32868 struct e1000_mbx_stats {
32869 u32 msgs_tx;
32870 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32871 };
32872
32873 struct e1000_mbx_info {
32874 - struct e1000_mbx_operations ops;
32875 + e1000_mbx_operations_no_const ops;
32876 struct e1000_mbx_stats stats;
32877 u32 timeout;
32878 u32 usec_delay;
32879 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32880 index d7ed58f..64cde36 100644
32881 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32882 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32883 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32884 s32 (*read_mac_addr)(struct e1000_hw *);
32885 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32886 };
32887 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32888
32889 struct e1000_mac_info {
32890 - struct e1000_mac_operations ops;
32891 + e1000_mac_operations_no_const ops;
32892 u8 addr[6];
32893 u8 perm_addr[6];
32894
32895 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32896 s32 (*check_for_ack)(struct e1000_hw *);
32897 s32 (*check_for_rst)(struct e1000_hw *);
32898 };
32899 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32900
32901 struct e1000_mbx_stats {
32902 u32 msgs_tx;
32903 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32904 };
32905
32906 struct e1000_mbx_info {
32907 - struct e1000_mbx_operations ops;
32908 + e1000_mbx_operations_no_const ops;
32909 struct e1000_mbx_stats stats;
32910 u32 timeout;
32911 u32 usec_delay;
32912 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32913 index 6c5cca8..de8ef63 100644
32914 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32915 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32916 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32917 s32 (*update_checksum)(struct ixgbe_hw *);
32918 u16 (*calc_checksum)(struct ixgbe_hw *);
32919 };
32920 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32921
32922 struct ixgbe_mac_operations {
32923 s32 (*init_hw)(struct ixgbe_hw *);
32924 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32925 /* Manageability interface */
32926 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32927 };
32928 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32929
32930 struct ixgbe_phy_operations {
32931 s32 (*identify)(struct ixgbe_hw *);
32932 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32933 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32934 s32 (*check_overtemp)(struct ixgbe_hw *);
32935 };
32936 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32937
32938 struct ixgbe_eeprom_info {
32939 - struct ixgbe_eeprom_operations ops;
32940 + ixgbe_eeprom_operations_no_const ops;
32941 enum ixgbe_eeprom_type type;
32942 u32 semaphore_delay;
32943 u16 word_size;
32944 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32945
32946 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32947 struct ixgbe_mac_info {
32948 - struct ixgbe_mac_operations ops;
32949 + ixgbe_mac_operations_no_const ops;
32950 enum ixgbe_mac_type type;
32951 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32952 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32953 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32954 };
32955
32956 struct ixgbe_phy_info {
32957 - struct ixgbe_phy_operations ops;
32958 + ixgbe_phy_operations_no_const ops;
32959 struct mdio_if_info mdio;
32960 enum ixgbe_phy_type type;
32961 u32 id;
32962 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32963 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32964 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32965 };
32966 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32967
32968 struct ixgbe_mbx_stats {
32969 u32 msgs_tx;
32970 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32971 };
32972
32973 struct ixgbe_mbx_info {
32974 - struct ixgbe_mbx_operations ops;
32975 + ixgbe_mbx_operations_no_const ops;
32976 struct ixgbe_mbx_stats stats;
32977 u32 timeout;
32978 u32 usec_delay;
32979 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32980 index 10306b4..28df758 100644
32981 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32982 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32983 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32984 s32 (*clear_vfta)(struct ixgbe_hw *);
32985 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32986 };
32987 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32988
32989 enum ixgbe_mac_type {
32990 ixgbe_mac_unknown = 0,
32991 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32992 };
32993
32994 struct ixgbe_mac_info {
32995 - struct ixgbe_mac_operations ops;
32996 + ixgbe_mac_operations_no_const ops;
32997 u8 addr[6];
32998 u8 perm_addr[6];
32999
33000 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33001 s32 (*check_for_ack)(struct ixgbe_hw *);
33002 s32 (*check_for_rst)(struct ixgbe_hw *);
33003 };
33004 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33005
33006 struct ixgbe_mbx_stats {
33007 u32 msgs_tx;
33008 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33009 };
33010
33011 struct ixgbe_mbx_info {
33012 - struct ixgbe_mbx_operations ops;
33013 + ixgbe_mbx_operations_no_const ops;
33014 struct ixgbe_mbx_stats stats;
33015 u32 timeout;
33016 u32 udelay;
33017 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
33018 index 94bbc85..78c12e6 100644
33019 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
33020 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
33021 @@ -40,6 +40,7 @@
33022 #include <linux/dma-mapping.h>
33023 #include <linux/slab.h>
33024 #include <linux/io-mapping.h>
33025 +#include <linux/sched.h>
33026
33027 #include <linux/mlx4/device.h>
33028 #include <linux/mlx4/doorbell.h>
33029 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33030 index 5046a64..71ca936 100644
33031 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
33032 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
33033 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
33034 void (*link_down)(struct __vxge_hw_device *devh);
33035 void (*crit_err)(struct __vxge_hw_device *devh,
33036 enum vxge_hw_event type, u64 ext_data);
33037 -};
33038 +} __no_const;
33039
33040 /*
33041 * struct __vxge_hw_blockpool_entry - Block private data structure
33042 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33043 index 4a518a3..936b334 100644
33044 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33045 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
33046 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
33047 struct vxge_hw_mempool_dma *dma_object,
33048 u32 index,
33049 u32 is_last);
33050 -};
33051 +} __no_const;
33052
33053 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
33054 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
33055 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
33056 index c8f47f1..5da9840 100644
33057 --- a/drivers/net/ethernet/realtek/r8169.c
33058 +++ b/drivers/net/ethernet/realtek/r8169.c
33059 @@ -698,17 +698,17 @@ struct rtl8169_private {
33060 struct mdio_ops {
33061 void (*write)(void __iomem *, int, int);
33062 int (*read)(void __iomem *, int);
33063 - } mdio_ops;
33064 + } __no_const mdio_ops;
33065
33066 struct pll_power_ops {
33067 void (*down)(struct rtl8169_private *);
33068 void (*up)(struct rtl8169_private *);
33069 - } pll_power_ops;
33070 + } __no_const pll_power_ops;
33071
33072 struct jumbo_ops {
33073 void (*enable)(struct rtl8169_private *);
33074 void (*disable)(struct rtl8169_private *);
33075 - } jumbo_ops;
33076 + } __no_const jumbo_ops;
33077
33078 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33079 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33080 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
33081 index 1b4658c..a30dabb 100644
33082 --- a/drivers/net/ethernet/sis/sis190.c
33083 +++ b/drivers/net/ethernet/sis/sis190.c
33084 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33085 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33086 struct net_device *dev)
33087 {
33088 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33089 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33090 struct sis190_private *tp = netdev_priv(dev);
33091 struct pci_dev *isa_bridge;
33092 u8 reg, tmp8;
33093 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
33094 index edfa15d..002bfa9 100644
33095 --- a/drivers/net/ppp/ppp_generic.c
33096 +++ b/drivers/net/ppp/ppp_generic.c
33097 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33098 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33099 struct ppp_stats stats;
33100 struct ppp_comp_stats cstats;
33101 - char *vers;
33102
33103 switch (cmd) {
33104 case SIOCGPPPSTATS:
33105 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33106 break;
33107
33108 case SIOCGPPPVER:
33109 - vers = PPP_VERSION;
33110 - if (copy_to_user(addr, vers, strlen(vers) + 1))
33111 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33112 break;
33113 err = 0;
33114 break;
33115 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33116 index 515f122..41dd273 100644
33117 --- a/drivers/net/tokenring/abyss.c
33118 +++ b/drivers/net/tokenring/abyss.c
33119 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33120
33121 static int __init abyss_init (void)
33122 {
33123 - abyss_netdev_ops = tms380tr_netdev_ops;
33124 + pax_open_kernel();
33125 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33126
33127 - abyss_netdev_ops.ndo_open = abyss_open;
33128 - abyss_netdev_ops.ndo_stop = abyss_close;
33129 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33130 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33131 + pax_close_kernel();
33132
33133 return pci_register_driver(&abyss_driver);
33134 }
33135 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33136 index 6153cfd..cf69c1c 100644
33137 --- a/drivers/net/tokenring/madgemc.c
33138 +++ b/drivers/net/tokenring/madgemc.c
33139 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33140
33141 static int __init madgemc_init (void)
33142 {
33143 - madgemc_netdev_ops = tms380tr_netdev_ops;
33144 - madgemc_netdev_ops.ndo_open = madgemc_open;
33145 - madgemc_netdev_ops.ndo_stop = madgemc_close;
33146 + pax_open_kernel();
33147 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33148 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
33149 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
33150 + pax_close_kernel();
33151
33152 return mca_register_driver (&madgemc_driver);
33153 }
33154 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
33155 index 8d362e6..f91cc52 100644
33156 --- a/drivers/net/tokenring/proteon.c
33157 +++ b/drivers/net/tokenring/proteon.c
33158 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
33159 struct platform_device *pdev;
33160 int i, num = 0, err = 0;
33161
33162 - proteon_netdev_ops = tms380tr_netdev_ops;
33163 - proteon_netdev_ops.ndo_open = proteon_open;
33164 - proteon_netdev_ops.ndo_stop = tms380tr_close;
33165 + pax_open_kernel();
33166 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33167 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
33168 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
33169 + pax_close_kernel();
33170
33171 err = platform_driver_register(&proteon_driver);
33172 if (err)
33173 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
33174 index 46db5c5..37c1536 100644
33175 --- a/drivers/net/tokenring/skisa.c
33176 +++ b/drivers/net/tokenring/skisa.c
33177 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
33178 struct platform_device *pdev;
33179 int i, num = 0, err = 0;
33180
33181 - sk_isa_netdev_ops = tms380tr_netdev_ops;
33182 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
33183 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33184 + pax_open_kernel();
33185 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33186 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33187 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33188 + pax_close_kernel();
33189
33190 err = platform_driver_register(&sk_isa_driver);
33191 if (err)
33192 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33193 index 304fe78..db112fa 100644
33194 --- a/drivers/net/usb/hso.c
33195 +++ b/drivers/net/usb/hso.c
33196 @@ -71,7 +71,7 @@
33197 #include <asm/byteorder.h>
33198 #include <linux/serial_core.h>
33199 #include <linux/serial.h>
33200 -
33201 +#include <asm/local.h>
33202
33203 #define MOD_AUTHOR "Option Wireless"
33204 #define MOD_DESCRIPTION "USB High Speed Option driver"
33205 @@ -257,7 +257,7 @@ struct hso_serial {
33206
33207 /* from usb_serial_port */
33208 struct tty_struct *tty;
33209 - int open_count;
33210 + local_t open_count;
33211 spinlock_t serial_lock;
33212
33213 int (*write_data) (struct hso_serial *serial);
33214 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33215 struct urb *urb;
33216
33217 urb = serial->rx_urb[0];
33218 - if (serial->open_count > 0) {
33219 + if (local_read(&serial->open_count) > 0) {
33220 count = put_rxbuf_data(urb, serial);
33221 if (count == -1)
33222 return;
33223 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33224 DUMP1(urb->transfer_buffer, urb->actual_length);
33225
33226 /* Anyone listening? */
33227 - if (serial->open_count == 0)
33228 + if (local_read(&serial->open_count) == 0)
33229 return;
33230
33231 if (status == 0) {
33232 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33233 spin_unlock_irq(&serial->serial_lock);
33234
33235 /* check for port already opened, if not set the termios */
33236 - serial->open_count++;
33237 - if (serial->open_count == 1) {
33238 + if (local_inc_return(&serial->open_count) == 1) {
33239 serial->rx_state = RX_IDLE;
33240 /* Force default termio settings */
33241 _hso_serial_set_termios(tty, NULL);
33242 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33243 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33244 if (result) {
33245 hso_stop_serial_device(serial->parent);
33246 - serial->open_count--;
33247 + local_dec(&serial->open_count);
33248 kref_put(&serial->parent->ref, hso_serial_ref_free);
33249 }
33250 } else {
33251 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33252
33253 /* reset the rts and dtr */
33254 /* do the actual close */
33255 - serial->open_count--;
33256 + local_dec(&serial->open_count);
33257
33258 - if (serial->open_count <= 0) {
33259 - serial->open_count = 0;
33260 + if (local_read(&serial->open_count) <= 0) {
33261 + local_set(&serial->open_count, 0);
33262 spin_lock_irq(&serial->serial_lock);
33263 if (serial->tty == tty) {
33264 serial->tty->driver_data = NULL;
33265 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33266
33267 /* the actual setup */
33268 spin_lock_irqsave(&serial->serial_lock, flags);
33269 - if (serial->open_count)
33270 + if (local_read(&serial->open_count))
33271 _hso_serial_set_termios(tty, old);
33272 else
33273 tty->termios = old;
33274 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33275 D1("Pending read interrupt on port %d\n", i);
33276 spin_lock(&serial->serial_lock);
33277 if (serial->rx_state == RX_IDLE &&
33278 - serial->open_count > 0) {
33279 + local_read(&serial->open_count) > 0) {
33280 /* Setup and send a ctrl req read on
33281 * port i */
33282 if (!serial->rx_urb_filled[0]) {
33283 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33284 /* Start all serial ports */
33285 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33286 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33287 - if (dev2ser(serial_table[i])->open_count) {
33288 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33289 result =
33290 hso_start_serial_device(serial_table[i], GFP_NOIO);
33291 hso_kick_transmit(dev2ser(serial_table[i]));
33292 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33293 index e662cbc..8d4a102 100644
33294 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33295 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33296 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33297 * Return with error code if any of the queue indices
33298 * is out of range
33299 */
33300 - if (p->ring_index[i] < 0 ||
33301 - p->ring_index[i] >= adapter->num_rx_queues)
33302 + if (p->ring_index[i] >= adapter->num_rx_queues)
33303 return -EINVAL;
33304 }
33305
33306 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33307 index 0f9ee46..e2d6e65 100644
33308 --- a/drivers/net/wireless/ath/ath.h
33309 +++ b/drivers/net/wireless/ath/ath.h
33310 @@ -119,6 +119,7 @@ struct ath_ops {
33311 void (*write_flush) (void *);
33312 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33313 };
33314 +typedef struct ath_ops __no_const ath_ops_no_const;
33315
33316 struct ath_common;
33317 struct ath_bus_ops;
33318 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33319 index b592016..fe47870 100644
33320 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33321 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33322 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33323 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33324 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33325
33326 - ACCESS_ONCE(ads->ds_link) = i->link;
33327 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33328 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33329 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33330
33331 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33332 ctl6 = SM(i->keytype, AR_EncrType);
33333 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33334
33335 if ((i->is_first || i->is_last) &&
33336 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33337 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33338 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33339 | set11nTries(i->rates, 1)
33340 | set11nTries(i->rates, 2)
33341 | set11nTries(i->rates, 3)
33342 | (i->dur_update ? AR_DurUpdateEna : 0)
33343 | SM(0, AR_BurstDur);
33344
33345 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33346 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33347 | set11nRate(i->rates, 1)
33348 | set11nRate(i->rates, 2)
33349 | set11nRate(i->rates, 3);
33350 } else {
33351 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33352 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33353 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33354 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33355 }
33356
33357 if (!i->is_first) {
33358 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33359 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33360 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33361 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33362 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33363 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33364 return;
33365 }
33366
33367 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33368 break;
33369 }
33370
33371 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33372 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33373 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33374 | SM(i->txpower, AR_XmitPower)
33375 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33376 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33377 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33378 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33379
33380 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33381 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33382 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33383 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33384
33385 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33386 return;
33387
33388 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33389 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33390 | set11nPktDurRTSCTS(i->rates, 1);
33391
33392 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33393 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33394 | set11nPktDurRTSCTS(i->rates, 3);
33395
33396 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33397 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33398 | set11nRateFlags(i->rates, 1)
33399 | set11nRateFlags(i->rates, 2)
33400 | set11nRateFlags(i->rates, 3)
33401 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33402 index f5ae3c6..7936af3 100644
33403 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33404 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33405 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33406 (i->qcu << AR_TxQcuNum_S) | 0x17;
33407
33408 checksum += val;
33409 - ACCESS_ONCE(ads->info) = val;
33410 + ACCESS_ONCE_RW(ads->info) = val;
33411
33412 checksum += i->link;
33413 - ACCESS_ONCE(ads->link) = i->link;
33414 + ACCESS_ONCE_RW(ads->link) = i->link;
33415
33416 checksum += i->buf_addr[0];
33417 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33418 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33419 checksum += i->buf_addr[1];
33420 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33421 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33422 checksum += i->buf_addr[2];
33423 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33424 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33425 checksum += i->buf_addr[3];
33426 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33427 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33428
33429 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33430 - ACCESS_ONCE(ads->ctl3) = val;
33431 + ACCESS_ONCE_RW(ads->ctl3) = val;
33432 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33433 - ACCESS_ONCE(ads->ctl5) = val;
33434 + ACCESS_ONCE_RW(ads->ctl5) = val;
33435 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33436 - ACCESS_ONCE(ads->ctl7) = val;
33437 + ACCESS_ONCE_RW(ads->ctl7) = val;
33438 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33439 - ACCESS_ONCE(ads->ctl9) = val;
33440 + ACCESS_ONCE_RW(ads->ctl9) = val;
33441
33442 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33443 - ACCESS_ONCE(ads->ctl10) = checksum;
33444 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33445
33446 if (i->is_first || i->is_last) {
33447 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33448 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33449 | set11nTries(i->rates, 1)
33450 | set11nTries(i->rates, 2)
33451 | set11nTries(i->rates, 3)
33452 | (i->dur_update ? AR_DurUpdateEna : 0)
33453 | SM(0, AR_BurstDur);
33454
33455 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33456 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33457 | set11nRate(i->rates, 1)
33458 | set11nRate(i->rates, 2)
33459 | set11nRate(i->rates, 3);
33460 } else {
33461 - ACCESS_ONCE(ads->ctl13) = 0;
33462 - ACCESS_ONCE(ads->ctl14) = 0;
33463 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33464 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33465 }
33466
33467 ads->ctl20 = 0;
33468 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33469
33470 ctl17 = SM(i->keytype, AR_EncrType);
33471 if (!i->is_first) {
33472 - ACCESS_ONCE(ads->ctl11) = 0;
33473 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33474 - ACCESS_ONCE(ads->ctl15) = 0;
33475 - ACCESS_ONCE(ads->ctl16) = 0;
33476 - ACCESS_ONCE(ads->ctl17) = ctl17;
33477 - ACCESS_ONCE(ads->ctl18) = 0;
33478 - ACCESS_ONCE(ads->ctl19) = 0;
33479 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33480 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33481 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33482 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33483 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33484 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33485 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33486 return;
33487 }
33488
33489 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33490 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33491 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33492 | SM(i->txpower, AR_XmitPower)
33493 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33494 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33495 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33496 ctl12 |= SM(val, AR_PAPRDChainMask);
33497
33498 - ACCESS_ONCE(ads->ctl12) = ctl12;
33499 - ACCESS_ONCE(ads->ctl17) = ctl17;
33500 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33501 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33502
33503 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33504 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33505 | set11nPktDurRTSCTS(i->rates, 1);
33506
33507 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33508 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33509 | set11nPktDurRTSCTS(i->rates, 3);
33510
33511 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33512 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33513 | set11nRateFlags(i->rates, 1)
33514 | set11nRateFlags(i->rates, 2)
33515 | set11nRateFlags(i->rates, 3)
33516 | SM(i->rtscts_rate, AR_RTSCTSRate);
33517
33518 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33519 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33520 }
33521
33522 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33523 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33524 index f389b3c..7359e18 100644
33525 --- a/drivers/net/wireless/ath/ath9k/hw.h
33526 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33527 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33528
33529 /* ANI */
33530 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33531 -};
33532 +} __no_const;
33533
33534 /**
33535 * struct ath_hw_ops - callbacks used by hardware code and driver code
33536 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33537 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33538 struct ath_hw_antcomb_conf *antconf);
33539
33540 -};
33541 +} __no_const;
33542
33543 struct ath_nf_limits {
33544 s16 max;
33545 @@ -655,7 +655,7 @@ enum ath_cal_list {
33546 #define AH_FASTCC 0x4
33547
33548 struct ath_hw {
33549 - struct ath_ops reg_ops;
33550 + ath_ops_no_const reg_ops;
33551
33552 struct ieee80211_hw *hw;
33553 struct ath_common common;
33554 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33555 index bea8524..c677c06 100644
33556 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33557 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33558 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33559 void (*carrsuppr)(struct brcms_phy *);
33560 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33561 void (*detach)(struct brcms_phy *);
33562 -};
33563 +} __no_const;
33564
33565 struct brcms_phy {
33566 struct brcms_phy_pub pubpi_ro;
33567 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33568 index 05f2ad1..ae00eea 100644
33569 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33570 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33571 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33572 */
33573 if (iwl3945_mod_params.disable_hw_scan) {
33574 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33575 - iwl3945_hw_ops.hw_scan = NULL;
33576 + pax_open_kernel();
33577 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33578 + pax_close_kernel();
33579 }
33580
33581 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33582 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33583 index 69a77e2..552b42c 100644
33584 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33585 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33586 @@ -71,8 +71,8 @@ do { \
33587 } while (0)
33588
33589 #else
33590 -#define IWL_DEBUG(m, level, fmt, args...)
33591 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33592 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33593 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33594 #define iwl_print_hex_dump(m, level, p, len)
33595 #endif /* CONFIG_IWLWIFI_DEBUG */
33596
33597 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33598 index 523ad55..f8c5dc5 100644
33599 --- a/drivers/net/wireless/mac80211_hwsim.c
33600 +++ b/drivers/net/wireless/mac80211_hwsim.c
33601 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33602 return -EINVAL;
33603
33604 if (fake_hw_scan) {
33605 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33606 - mac80211_hwsim_ops.sw_scan_start = NULL;
33607 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33608 + pax_open_kernel();
33609 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33610 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33611 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33612 + pax_close_kernel();
33613 }
33614
33615 spin_lock_init(&hwsim_radio_lock);
33616 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33617 index 30f138b..c904585 100644
33618 --- a/drivers/net/wireless/mwifiex/main.h
33619 +++ b/drivers/net/wireless/mwifiex/main.h
33620 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33621 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33622 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33623 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33624 -};
33625 +} __no_const;
33626
33627 struct mwifiex_adapter {
33628 u8 iface_type;
33629 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33630 index 0c13840..a5c3ed6 100644
33631 --- a/drivers/net/wireless/rndis_wlan.c
33632 +++ b/drivers/net/wireless/rndis_wlan.c
33633 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33634
33635 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33636
33637 - if (rts_threshold < 0 || rts_threshold > 2347)
33638 + if (rts_threshold > 2347)
33639 rts_threshold = 2347;
33640
33641 tmp = cpu_to_le32(rts_threshold);
33642 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33643 index a77f1bb..c608b2b 100644
33644 --- a/drivers/net/wireless/wl1251/wl1251.h
33645 +++ b/drivers/net/wireless/wl1251/wl1251.h
33646 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33647 void (*reset)(struct wl1251 *wl);
33648 void (*enable_irq)(struct wl1251 *wl);
33649 void (*disable_irq)(struct wl1251 *wl);
33650 -};
33651 +} __no_const;
33652
33653 struct wl1251 {
33654 struct ieee80211_hw *hw;
33655 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33656 index f34b5b2..b5abb9f 100644
33657 --- a/drivers/oprofile/buffer_sync.c
33658 +++ b/drivers/oprofile/buffer_sync.c
33659 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33660 if (cookie == NO_COOKIE)
33661 offset = pc;
33662 if (cookie == INVALID_COOKIE) {
33663 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33664 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33665 offset = pc;
33666 }
33667 if (cookie != last_cookie) {
33668 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33669 /* add userspace sample */
33670
33671 if (!mm) {
33672 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33673 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33674 return 0;
33675 }
33676
33677 cookie = lookup_dcookie(mm, s->eip, &offset);
33678
33679 if (cookie == INVALID_COOKIE) {
33680 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33681 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33682 return 0;
33683 }
33684
33685 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33686 /* ignore backtraces if failed to add a sample */
33687 if (state == sb_bt_start) {
33688 state = sb_bt_ignore;
33689 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33690 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33691 }
33692 }
33693 release_mm(mm);
33694 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33695 index c0cc4e7..44d4e54 100644
33696 --- a/drivers/oprofile/event_buffer.c
33697 +++ b/drivers/oprofile/event_buffer.c
33698 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33699 }
33700
33701 if (buffer_pos == buffer_size) {
33702 - atomic_inc(&oprofile_stats.event_lost_overflow);
33703 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33704 return;
33705 }
33706
33707 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33708 index f8c752e..28bf4fc 100644
33709 --- a/drivers/oprofile/oprof.c
33710 +++ b/drivers/oprofile/oprof.c
33711 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33712 if (oprofile_ops.switch_events())
33713 return;
33714
33715 - atomic_inc(&oprofile_stats.multiplex_counter);
33716 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33717 start_switch_worker();
33718 }
33719
33720 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33721 index 917d28e..d62d981 100644
33722 --- a/drivers/oprofile/oprofile_stats.c
33723 +++ b/drivers/oprofile/oprofile_stats.c
33724 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33725 cpu_buf->sample_invalid_eip = 0;
33726 }
33727
33728 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33729 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33730 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33731 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33732 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33733 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33734 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33735 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33736 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33737 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33738 }
33739
33740
33741 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33742 index 38b6fc0..b5cbfce 100644
33743 --- a/drivers/oprofile/oprofile_stats.h
33744 +++ b/drivers/oprofile/oprofile_stats.h
33745 @@ -13,11 +13,11 @@
33746 #include <linux/atomic.h>
33747
33748 struct oprofile_stat_struct {
33749 - atomic_t sample_lost_no_mm;
33750 - atomic_t sample_lost_no_mapping;
33751 - atomic_t bt_lost_no_mapping;
33752 - atomic_t event_lost_overflow;
33753 - atomic_t multiplex_counter;
33754 + atomic_unchecked_t sample_lost_no_mm;
33755 + atomic_unchecked_t sample_lost_no_mapping;
33756 + atomic_unchecked_t bt_lost_no_mapping;
33757 + atomic_unchecked_t event_lost_overflow;
33758 + atomic_unchecked_t multiplex_counter;
33759 };
33760
33761 extern struct oprofile_stat_struct oprofile_stats;
33762 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33763 index 2f0aa0f..90fab02 100644
33764 --- a/drivers/oprofile/oprofilefs.c
33765 +++ b/drivers/oprofile/oprofilefs.c
33766 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33767
33768
33769 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33770 - char const *name, atomic_t *val)
33771 + char const *name, atomic_unchecked_t *val)
33772 {
33773 return __oprofilefs_create_file(sb, root, name,
33774 &atomic_ro_fops, 0444, val);
33775 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33776 index 3f56bc0..707d642 100644
33777 --- a/drivers/parport/procfs.c
33778 +++ b/drivers/parport/procfs.c
33779 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33780
33781 *ppos += len;
33782
33783 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33784 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33785 }
33786
33787 #ifdef CONFIG_PARPORT_1284
33788 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33789
33790 *ppos += len;
33791
33792 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33793 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33794 }
33795 #endif /* IEEE1284.3 support. */
33796
33797 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33798 index 9fff878..ad0ad53 100644
33799 --- a/drivers/pci/hotplug/cpci_hotplug.h
33800 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33801 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33802 int (*hardware_test) (struct slot* slot, u32 value);
33803 u8 (*get_power) (struct slot* slot);
33804 int (*set_power) (struct slot* slot, int value);
33805 -};
33806 +} __no_const;
33807
33808 struct cpci_hp_controller {
33809 unsigned int irq;
33810 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33811 index 76ba8a1..20ca857 100644
33812 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33813 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33814 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33815
33816 void compaq_nvram_init (void __iomem *rom_start)
33817 {
33818 +
33819 +#ifndef CONFIG_PAX_KERNEXEC
33820 if (rom_start) {
33821 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33822 }
33823 +#endif
33824 +
33825 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33826
33827 /* initialize our int15 lock */
33828 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33829 index 1cfbf22..be96487 100644
33830 --- a/drivers/pci/pcie/aspm.c
33831 +++ b/drivers/pci/pcie/aspm.c
33832 @@ -27,9 +27,9 @@
33833 #define MODULE_PARAM_PREFIX "pcie_aspm."
33834
33835 /* Note: those are not register definitions */
33836 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33837 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33838 -#define ASPM_STATE_L1 (4) /* L1 state */
33839 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33840 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33841 +#define ASPM_STATE_L1 (4U) /* L1 state */
33842 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33843 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33844
33845 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33846 index dfee1b3..a454fb6 100644
33847 --- a/drivers/pci/probe.c
33848 +++ b/drivers/pci/probe.c
33849 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33850 u32 l, sz, mask;
33851 u16 orig_cmd;
33852
33853 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33854 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33855
33856 if (!dev->mmio_always_on) {
33857 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33858 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33859 index 27911b5..5b6db88 100644
33860 --- a/drivers/pci/proc.c
33861 +++ b/drivers/pci/proc.c
33862 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33863 static int __init pci_proc_init(void)
33864 {
33865 struct pci_dev *dev = NULL;
33866 +
33867 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33868 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33869 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33870 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33871 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33872 +#endif
33873 +#else
33874 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33875 +#endif
33876 proc_create("devices", 0, proc_bus_pci_dir,
33877 &proc_bus_pci_dev_operations);
33878 proc_initialized = 1;
33879 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33880 index 7b82868..b9344c9 100644
33881 --- a/drivers/platform/x86/thinkpad_acpi.c
33882 +++ b/drivers/platform/x86/thinkpad_acpi.c
33883 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33884 return 0;
33885 }
33886
33887 -void static hotkey_mask_warn_incomplete_mask(void)
33888 +static void hotkey_mask_warn_incomplete_mask(void)
33889 {
33890 /* log only what the user can fix... */
33891 const u32 wantedmask = hotkey_driver_mask &
33892 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33893 }
33894 }
33895
33896 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33897 - struct tp_nvram_state *newn,
33898 - const u32 event_mask)
33899 -{
33900 -
33901 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33902 do { \
33903 if ((event_mask & (1 << __scancode)) && \
33904 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33905 tpacpi_hotkey_send_key(__scancode); \
33906 } while (0)
33907
33908 - void issue_volchange(const unsigned int oldvol,
33909 - const unsigned int newvol)
33910 - {
33911 - unsigned int i = oldvol;
33912 +static void issue_volchange(const unsigned int oldvol,
33913 + const unsigned int newvol,
33914 + const u32 event_mask)
33915 +{
33916 + unsigned int i = oldvol;
33917
33918 - while (i > newvol) {
33919 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33920 - i--;
33921 - }
33922 - while (i < newvol) {
33923 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33924 - i++;
33925 - }
33926 + while (i > newvol) {
33927 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33928 + i--;
33929 }
33930 + while (i < newvol) {
33931 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33932 + i++;
33933 + }
33934 +}
33935
33936 - void issue_brightnesschange(const unsigned int oldbrt,
33937 - const unsigned int newbrt)
33938 - {
33939 - unsigned int i = oldbrt;
33940 +static void issue_brightnesschange(const unsigned int oldbrt,
33941 + const unsigned int newbrt,
33942 + const u32 event_mask)
33943 +{
33944 + unsigned int i = oldbrt;
33945
33946 - while (i > newbrt) {
33947 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33948 - i--;
33949 - }
33950 - while (i < newbrt) {
33951 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33952 - i++;
33953 - }
33954 + while (i > newbrt) {
33955 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33956 + i--;
33957 + }
33958 + while (i < newbrt) {
33959 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33960 + i++;
33961 }
33962 +}
33963
33964 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33965 + struct tp_nvram_state *newn,
33966 + const u32 event_mask)
33967 +{
33968 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33969 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33970 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33971 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33972 oldn->volume_level != newn->volume_level) {
33973 /* recently muted, or repeated mute keypress, or
33974 * multiple presses ending in mute */
33975 - issue_volchange(oldn->volume_level, newn->volume_level);
33976 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33977 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33978 }
33979 } else {
33980 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33981 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33982 }
33983 if (oldn->volume_level != newn->volume_level) {
33984 - issue_volchange(oldn->volume_level, newn->volume_level);
33985 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33986 } else if (oldn->volume_toggle != newn->volume_toggle) {
33987 /* repeated vol up/down keypress at end of scale ? */
33988 if (newn->volume_level == 0)
33989 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33990 /* handle brightness */
33991 if (oldn->brightness_level != newn->brightness_level) {
33992 issue_brightnesschange(oldn->brightness_level,
33993 - newn->brightness_level);
33994 + newn->brightness_level,
33995 + event_mask);
33996 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33997 /* repeated key presses that didn't change state */
33998 if (newn->brightness_level == 0)
33999 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
34000 && !tp_features.bright_unkfw)
34001 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
34002 }
34003 +}
34004
34005 #undef TPACPI_COMPARE_KEY
34006 #undef TPACPI_MAY_SEND_KEY
34007 -}
34008
34009 /*
34010 * Polling driver
34011 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
34012 index b859d16..5cc6b1a 100644
34013 --- a/drivers/pnp/pnpbios/bioscalls.c
34014 +++ b/drivers/pnp/pnpbios/bioscalls.c
34015 @@ -59,7 +59,7 @@ do { \
34016 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
34017 } while(0)
34018
34019 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
34020 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
34021 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
34022
34023 /*
34024 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34025
34026 cpu = get_cpu();
34027 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
34028 +
34029 + pax_open_kernel();
34030 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
34031 + pax_close_kernel();
34032
34033 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
34034 spin_lock_irqsave(&pnp_bios_lock, flags);
34035 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
34036 :"memory");
34037 spin_unlock_irqrestore(&pnp_bios_lock, flags);
34038
34039 + pax_open_kernel();
34040 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
34041 + pax_close_kernel();
34042 +
34043 put_cpu();
34044
34045 /* If we get here and this is set then the PnP BIOS faulted on us. */
34046 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
34047 return status;
34048 }
34049
34050 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
34051 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
34052 {
34053 int i;
34054
34055 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34056 pnp_bios_callpoint.offset = header->fields.pm16offset;
34057 pnp_bios_callpoint.segment = PNP_CS16;
34058
34059 + pax_open_kernel();
34060 +
34061 for_each_possible_cpu(i) {
34062 struct desc_struct *gdt = get_cpu_gdt_table(i);
34063 if (!gdt)
34064 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
34065 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
34066 (unsigned long)__va(header->fields.pm16dseg));
34067 }
34068 +
34069 + pax_close_kernel();
34070 }
34071 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
34072 index b0ecacb..7c9da2e 100644
34073 --- a/drivers/pnp/resource.c
34074 +++ b/drivers/pnp/resource.c
34075 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
34076 return 1;
34077
34078 /* check if the resource is valid */
34079 - if (*irq < 0 || *irq > 15)
34080 + if (*irq > 15)
34081 return 0;
34082
34083 /* check if the resource is reserved */
34084 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
34085 return 1;
34086
34087 /* check if the resource is valid */
34088 - if (*dma < 0 || *dma == 4 || *dma > 7)
34089 + if (*dma == 4 || *dma > 7)
34090 return 0;
34091
34092 /* check if the resource is reserved */
34093 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
34094 index bb16f5b..c751eef 100644
34095 --- a/drivers/power/bq27x00_battery.c
34096 +++ b/drivers/power/bq27x00_battery.c
34097 @@ -67,7 +67,7 @@
34098 struct bq27x00_device_info;
34099 struct bq27x00_access_methods {
34100 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
34101 -};
34102 +} __no_const;
34103
34104 enum bq27x00_chip { BQ27000, BQ27500 };
34105
34106 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
34107 index 33f5d9a..d957d3f 100644
34108 --- a/drivers/regulator/max8660.c
34109 +++ b/drivers/regulator/max8660.c
34110 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
34111 max8660->shadow_regs[MAX8660_OVER1] = 5;
34112 } else {
34113 /* Otherwise devices can be toggled via software */
34114 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
34115 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
34116 + pax_open_kernel();
34117 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
34118 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
34119 + pax_close_kernel();
34120 }
34121
34122 /*
34123 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
34124 index 023d17d..74ef35b 100644
34125 --- a/drivers/regulator/mc13892-regulator.c
34126 +++ b/drivers/regulator/mc13892-regulator.c
34127 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
34128 }
34129 mc13xxx_unlock(mc13892);
34130
34131 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34132 + pax_open_kernel();
34133 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
34134 = mc13892_vcam_set_mode;
34135 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34136 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
34137 = mc13892_vcam_get_mode;
34138 + pax_close_kernel();
34139 for (i = 0; i < pdata->num_regulators; i++) {
34140 init_data = &pdata->regulators[i];
34141 priv->regulators[i] = regulator_register(
34142 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
34143 index cace6d3..f623fda 100644
34144 --- a/drivers/rtc/rtc-dev.c
34145 +++ b/drivers/rtc/rtc-dev.c
34146 @@ -14,6 +14,7 @@
34147 #include <linux/module.h>
34148 #include <linux/rtc.h>
34149 #include <linux/sched.h>
34150 +#include <linux/grsecurity.h>
34151 #include "rtc-core.h"
34152
34153 static dev_t rtc_devt;
34154 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
34155 if (copy_from_user(&tm, uarg, sizeof(tm)))
34156 return -EFAULT;
34157
34158 + gr_log_timechange();
34159 +
34160 return rtc_set_time(rtc, &tm);
34161
34162 case RTC_PIE_ON:
34163 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
34164 index ffb5878..e6d785c 100644
34165 --- a/drivers/scsi/aacraid/aacraid.h
34166 +++ b/drivers/scsi/aacraid/aacraid.h
34167 @@ -492,7 +492,7 @@ struct adapter_ops
34168 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
34169 /* Administrative operations */
34170 int (*adapter_comm)(struct aac_dev * dev, int comm);
34171 -};
34172 +} __no_const;
34173
34174 /*
34175 * Define which interrupt handler needs to be installed
34176 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
34177 index 705e13e..91c873c 100644
34178 --- a/drivers/scsi/aacraid/linit.c
34179 +++ b/drivers/scsi/aacraid/linit.c
34180 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
34181 #elif defined(__devinitconst)
34182 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34183 #else
34184 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34185 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34186 #endif
34187 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34188 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34189 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34190 index d5ff142..49c0ebb 100644
34191 --- a/drivers/scsi/aic94xx/aic94xx_init.c
34192 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
34193 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34194 .lldd_control_phy = asd_control_phy,
34195 };
34196
34197 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34198 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34199 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34200 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34201 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34202 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34203 index a796de9..1ef20e1 100644
34204 --- a/drivers/scsi/bfa/bfa.h
34205 +++ b/drivers/scsi/bfa/bfa.h
34206 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
34207 u32 *end);
34208 int cpe_vec_q0;
34209 int rme_vec_q0;
34210 -};
34211 +} __no_const;
34212 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34213
34214 struct bfa_faa_cbfn_s {
34215 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34216 index e07bd47..cd1bbbb 100644
34217 --- a/drivers/scsi/bfa/bfa_fcpim.c
34218 +++ b/drivers/scsi/bfa/bfa_fcpim.c
34219 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34220
34221 bfa_iotag_attach(fcp);
34222
34223 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34224 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34225 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34226 (fcp->num_itns * sizeof(struct bfa_itn_s));
34227 memset(fcp->itn_arr, 0,
34228 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34229 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34230 {
34231 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34232 - struct bfa_itn_s *itn;
34233 + bfa_itn_s_no_const *itn;
34234
34235 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34236 itn->isr = isr;
34237 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34238 index 1080bcb..a3b39e3 100644
34239 --- a/drivers/scsi/bfa/bfa_fcpim.h
34240 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34241 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34242 struct bfa_itn_s {
34243 bfa_isr_func_t isr;
34244 };
34245 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34246
34247 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34248 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34249 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34250 struct list_head iotag_tio_free_q; /* free IO resources */
34251 struct list_head iotag_unused_q; /* unused IO resources*/
34252 struct bfa_iotag_s *iotag_arr;
34253 - struct bfa_itn_s *itn_arr;
34254 + bfa_itn_s_no_const *itn_arr;
34255 int num_ioim_reqs;
34256 int num_fwtio_reqs;
34257 int num_itns;
34258 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34259 index 546d46b..642fa5b 100644
34260 --- a/drivers/scsi/bfa/bfa_ioc.h
34261 +++ b/drivers/scsi/bfa/bfa_ioc.h
34262 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34263 bfa_ioc_disable_cbfn_t disable_cbfn;
34264 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34265 bfa_ioc_reset_cbfn_t reset_cbfn;
34266 -};
34267 +} __no_const;
34268
34269 /*
34270 * IOC event notification mechanism.
34271 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34272 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34273 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34274 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34275 -};
34276 +} __no_const;
34277
34278 /*
34279 * Queue element to wait for room in request queue. FIFO order is
34280 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34281 index 351dc0b..951dc32 100644
34282 --- a/drivers/scsi/hosts.c
34283 +++ b/drivers/scsi/hosts.c
34284 @@ -42,7 +42,7 @@
34285 #include "scsi_logging.h"
34286
34287
34288 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34289 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34290
34291
34292 static void scsi_host_cls_release(struct device *dev)
34293 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34294 * subtract one because we increment first then return, but we need to
34295 * know what the next host number was before increment
34296 */
34297 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34298 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34299 shost->dma_channel = 0xff;
34300
34301 /* These three are default values which can be overridden */
34302 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34303 index 865d452..e9b7fa7 100644
34304 --- a/drivers/scsi/hpsa.c
34305 +++ b/drivers/scsi/hpsa.c
34306 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34307 u32 a;
34308
34309 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34310 - return h->access.command_completed(h);
34311 + return h->access->command_completed(h);
34312
34313 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34314 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34315 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34316 while (!list_empty(&h->reqQ)) {
34317 c = list_entry(h->reqQ.next, struct CommandList, list);
34318 /* can't do anything if fifo is full */
34319 - if ((h->access.fifo_full(h))) {
34320 + if ((h->access->fifo_full(h))) {
34321 dev_warn(&h->pdev->dev, "fifo full\n");
34322 break;
34323 }
34324 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34325 h->Qdepth--;
34326
34327 /* Tell the controller execute command */
34328 - h->access.submit_command(h, c);
34329 + h->access->submit_command(h, c);
34330
34331 /* Put job onto the completed Q */
34332 addQ(&h->cmpQ, c);
34333 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34334
34335 static inline unsigned long get_next_completion(struct ctlr_info *h)
34336 {
34337 - return h->access.command_completed(h);
34338 + return h->access->command_completed(h);
34339 }
34340
34341 static inline bool interrupt_pending(struct ctlr_info *h)
34342 {
34343 - return h->access.intr_pending(h);
34344 + return h->access->intr_pending(h);
34345 }
34346
34347 static inline long interrupt_not_for_us(struct ctlr_info *h)
34348 {
34349 - return (h->access.intr_pending(h) == 0) ||
34350 + return (h->access->intr_pending(h) == 0) ||
34351 (h->interrupts_enabled == 0);
34352 }
34353
34354 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34355 if (prod_index < 0)
34356 return -ENODEV;
34357 h->product_name = products[prod_index].product_name;
34358 - h->access = *(products[prod_index].access);
34359 + h->access = products[prod_index].access;
34360
34361 if (hpsa_board_disabled(h->pdev)) {
34362 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34363 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34364
34365 assert_spin_locked(&lockup_detector_lock);
34366 remove_ctlr_from_lockup_detector_list(h);
34367 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34368 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34369 spin_lock_irqsave(&h->lock, flags);
34370 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34371 spin_unlock_irqrestore(&h->lock, flags);
34372 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34373 }
34374
34375 /* make sure the board interrupts are off */
34376 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34377 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34378
34379 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34380 goto clean2;
34381 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34382 * fake ones to scoop up any residual completions.
34383 */
34384 spin_lock_irqsave(&h->lock, flags);
34385 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34386 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34387 spin_unlock_irqrestore(&h->lock, flags);
34388 free_irq(h->intr[h->intr_mode], h);
34389 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34390 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34391 dev_info(&h->pdev->dev, "Board READY.\n");
34392 dev_info(&h->pdev->dev,
34393 "Waiting for stale completions to drain.\n");
34394 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34395 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34396 msleep(10000);
34397 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34398 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34399
34400 rc = controller_reset_failed(h->cfgtable);
34401 if (rc)
34402 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34403 }
34404
34405 /* Turn the interrupts on so we can service requests */
34406 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34407 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34408
34409 hpsa_hba_inquiry(h);
34410 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34411 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34412 * To write all data in the battery backed cache to disks
34413 */
34414 hpsa_flush_cache(h);
34415 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34416 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34417 free_irq(h->intr[h->intr_mode], h);
34418 #ifdef CONFIG_PCI_MSI
34419 if (h->msix_vector)
34420 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34421 return;
34422 }
34423 /* Change the access methods to the performant access methods */
34424 - h->access = SA5_performant_access;
34425 + h->access = &SA5_performant_access;
34426 h->transMethod = CFGTBL_Trans_Performant;
34427 }
34428
34429 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34430 index 91edafb..a9b88ec 100644
34431 --- a/drivers/scsi/hpsa.h
34432 +++ b/drivers/scsi/hpsa.h
34433 @@ -73,7 +73,7 @@ struct ctlr_info {
34434 unsigned int msix_vector;
34435 unsigned int msi_vector;
34436 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34437 - struct access_method access;
34438 + struct access_method *access;
34439
34440 /* queue and queue Info */
34441 struct list_head reqQ;
34442 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34443 index f2df059..a3a9930 100644
34444 --- a/drivers/scsi/ips.h
34445 +++ b/drivers/scsi/ips.h
34446 @@ -1027,7 +1027,7 @@ typedef struct {
34447 int (*intr)(struct ips_ha *);
34448 void (*enableint)(struct ips_ha *);
34449 uint32_t (*statupd)(struct ips_ha *);
34450 -} ips_hw_func_t;
34451 +} __no_const ips_hw_func_t;
34452
34453 typedef struct ips_ha {
34454 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34455 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34456 index 9de9db2..1e09660 100644
34457 --- a/drivers/scsi/libfc/fc_exch.c
34458 +++ b/drivers/scsi/libfc/fc_exch.c
34459 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34460 * all together if not used XXX
34461 */
34462 struct {
34463 - atomic_t no_free_exch;
34464 - atomic_t no_free_exch_xid;
34465 - atomic_t xid_not_found;
34466 - atomic_t xid_busy;
34467 - atomic_t seq_not_found;
34468 - atomic_t non_bls_resp;
34469 + atomic_unchecked_t no_free_exch;
34470 + atomic_unchecked_t no_free_exch_xid;
34471 + atomic_unchecked_t xid_not_found;
34472 + atomic_unchecked_t xid_busy;
34473 + atomic_unchecked_t seq_not_found;
34474 + atomic_unchecked_t non_bls_resp;
34475 } stats;
34476 };
34477
34478 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34479 /* allocate memory for exchange */
34480 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34481 if (!ep) {
34482 - atomic_inc(&mp->stats.no_free_exch);
34483 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34484 goto out;
34485 }
34486 memset(ep, 0, sizeof(*ep));
34487 @@ -780,7 +780,7 @@ out:
34488 return ep;
34489 err:
34490 spin_unlock_bh(&pool->lock);
34491 - atomic_inc(&mp->stats.no_free_exch_xid);
34492 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34493 mempool_free(ep, mp->ep_pool);
34494 return NULL;
34495 }
34496 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34497 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34498 ep = fc_exch_find(mp, xid);
34499 if (!ep) {
34500 - atomic_inc(&mp->stats.xid_not_found);
34501 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34502 reject = FC_RJT_OX_ID;
34503 goto out;
34504 }
34505 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34506 ep = fc_exch_find(mp, xid);
34507 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34508 if (ep) {
34509 - atomic_inc(&mp->stats.xid_busy);
34510 + atomic_inc_unchecked(&mp->stats.xid_busy);
34511 reject = FC_RJT_RX_ID;
34512 goto rel;
34513 }
34514 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34515 }
34516 xid = ep->xid; /* get our XID */
34517 } else if (!ep) {
34518 - atomic_inc(&mp->stats.xid_not_found);
34519 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34520 reject = FC_RJT_RX_ID; /* XID not found */
34521 goto out;
34522 }
34523 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34524 } else {
34525 sp = &ep->seq;
34526 if (sp->id != fh->fh_seq_id) {
34527 - atomic_inc(&mp->stats.seq_not_found);
34528 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34529 if (f_ctl & FC_FC_END_SEQ) {
34530 /*
34531 * Update sequence_id based on incoming last
34532 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34533
34534 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34535 if (!ep) {
34536 - atomic_inc(&mp->stats.xid_not_found);
34537 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34538 goto out;
34539 }
34540 if (ep->esb_stat & ESB_ST_COMPLETE) {
34541 - atomic_inc(&mp->stats.xid_not_found);
34542 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34543 goto rel;
34544 }
34545 if (ep->rxid == FC_XID_UNKNOWN)
34546 ep->rxid = ntohs(fh->fh_rx_id);
34547 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34548 - atomic_inc(&mp->stats.xid_not_found);
34549 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34550 goto rel;
34551 }
34552 if (ep->did != ntoh24(fh->fh_s_id) &&
34553 ep->did != FC_FID_FLOGI) {
34554 - atomic_inc(&mp->stats.xid_not_found);
34555 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34556 goto rel;
34557 }
34558 sof = fr_sof(fp);
34559 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34560 sp->ssb_stat |= SSB_ST_RESP;
34561 sp->id = fh->fh_seq_id;
34562 } else if (sp->id != fh->fh_seq_id) {
34563 - atomic_inc(&mp->stats.seq_not_found);
34564 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34565 goto rel;
34566 }
34567
34568 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34569 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34570
34571 if (!sp)
34572 - atomic_inc(&mp->stats.xid_not_found);
34573 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34574 else
34575 - atomic_inc(&mp->stats.non_bls_resp);
34576 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34577
34578 fc_frame_free(fp);
34579 }
34580 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34581 index db9238f..4378ed2 100644
34582 --- a/drivers/scsi/libsas/sas_ata.c
34583 +++ b/drivers/scsi/libsas/sas_ata.c
34584 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34585 .postreset = ata_std_postreset,
34586 .error_handler = ata_std_error_handler,
34587 .post_internal_cmd = sas_ata_post_internal,
34588 - .qc_defer = ata_std_qc_defer,
34589 + .qc_defer = ata_std_qc_defer,
34590 .qc_prep = ata_noop_qc_prep,
34591 .qc_issue = sas_ata_qc_issue,
34592 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34593 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34594 index bb4c8e0..f33d849 100644
34595 --- a/drivers/scsi/lpfc/lpfc.h
34596 +++ b/drivers/scsi/lpfc/lpfc.h
34597 @@ -425,7 +425,7 @@ struct lpfc_vport {
34598 struct dentry *debug_nodelist;
34599 struct dentry *vport_debugfs_root;
34600 struct lpfc_debugfs_trc *disc_trc;
34601 - atomic_t disc_trc_cnt;
34602 + atomic_unchecked_t disc_trc_cnt;
34603 #endif
34604 uint8_t stat_data_enabled;
34605 uint8_t stat_data_blocked;
34606 @@ -835,8 +835,8 @@ struct lpfc_hba {
34607 struct timer_list fabric_block_timer;
34608 unsigned long bit_flags;
34609 #define FABRIC_COMANDS_BLOCKED 0
34610 - atomic_t num_rsrc_err;
34611 - atomic_t num_cmd_success;
34612 + atomic_unchecked_t num_rsrc_err;
34613 + atomic_unchecked_t num_cmd_success;
34614 unsigned long last_rsrc_error_time;
34615 unsigned long last_ramp_down_time;
34616 unsigned long last_ramp_up_time;
34617 @@ -866,7 +866,7 @@ struct lpfc_hba {
34618
34619 struct dentry *debug_slow_ring_trc;
34620 struct lpfc_debugfs_trc *slow_ring_trc;
34621 - atomic_t slow_ring_trc_cnt;
34622 + atomic_unchecked_t slow_ring_trc_cnt;
34623 /* iDiag debugfs sub-directory */
34624 struct dentry *idiag_root;
34625 struct dentry *idiag_pci_cfg;
34626 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34627 index 2838259..a07cfb5 100644
34628 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34629 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34630 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34631
34632 #include <linux/debugfs.h>
34633
34634 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34635 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34636 static unsigned long lpfc_debugfs_start_time = 0L;
34637
34638 /* iDiag */
34639 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34640 lpfc_debugfs_enable = 0;
34641
34642 len = 0;
34643 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34644 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34645 (lpfc_debugfs_max_disc_trc - 1);
34646 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34647 dtp = vport->disc_trc + i;
34648 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34649 lpfc_debugfs_enable = 0;
34650
34651 len = 0;
34652 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34653 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34654 (lpfc_debugfs_max_slow_ring_trc - 1);
34655 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34656 dtp = phba->slow_ring_trc + i;
34657 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34658 !vport || !vport->disc_trc)
34659 return;
34660
34661 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34662 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34663 (lpfc_debugfs_max_disc_trc - 1);
34664 dtp = vport->disc_trc + index;
34665 dtp->fmt = fmt;
34666 dtp->data1 = data1;
34667 dtp->data2 = data2;
34668 dtp->data3 = data3;
34669 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34670 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34671 dtp->jif = jiffies;
34672 #endif
34673 return;
34674 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34675 !phba || !phba->slow_ring_trc)
34676 return;
34677
34678 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34679 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34680 (lpfc_debugfs_max_slow_ring_trc - 1);
34681 dtp = phba->slow_ring_trc + index;
34682 dtp->fmt = fmt;
34683 dtp->data1 = data1;
34684 dtp->data2 = data2;
34685 dtp->data3 = data3;
34686 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34687 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34688 dtp->jif = jiffies;
34689 #endif
34690 return;
34691 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34692 "slow_ring buffer\n");
34693 goto debug_failed;
34694 }
34695 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34696 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34697 memset(phba->slow_ring_trc, 0,
34698 (sizeof(struct lpfc_debugfs_trc) *
34699 lpfc_debugfs_max_slow_ring_trc));
34700 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34701 "buffer\n");
34702 goto debug_failed;
34703 }
34704 - atomic_set(&vport->disc_trc_cnt, 0);
34705 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34706
34707 snprintf(name, sizeof(name), "discovery_trace");
34708 vport->debug_disc_trc =
34709 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34710 index 55bc4fc..a2a109c 100644
34711 --- a/drivers/scsi/lpfc/lpfc_init.c
34712 +++ b/drivers/scsi/lpfc/lpfc_init.c
34713 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34714 printk(LPFC_COPYRIGHT "\n");
34715
34716 if (lpfc_enable_npiv) {
34717 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34718 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34719 + pax_open_kernel();
34720 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34721 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34722 + pax_close_kernel();
34723 }
34724 lpfc_transport_template =
34725 fc_attach_transport(&lpfc_transport_functions);
34726 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34727 index 2e1e54e..1af0a0d 100644
34728 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34729 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34730 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34731 uint32_t evt_posted;
34732
34733 spin_lock_irqsave(&phba->hbalock, flags);
34734 - atomic_inc(&phba->num_rsrc_err);
34735 + atomic_inc_unchecked(&phba->num_rsrc_err);
34736 phba->last_rsrc_error_time = jiffies;
34737
34738 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34739 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34740 unsigned long flags;
34741 struct lpfc_hba *phba = vport->phba;
34742 uint32_t evt_posted;
34743 - atomic_inc(&phba->num_cmd_success);
34744 + atomic_inc_unchecked(&phba->num_cmd_success);
34745
34746 if (vport->cfg_lun_queue_depth <= queue_depth)
34747 return;
34748 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34749 unsigned long num_rsrc_err, num_cmd_success;
34750 int i;
34751
34752 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34753 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34754 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34755 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34756
34757 vports = lpfc_create_vport_work_array(phba);
34758 if (vports != NULL)
34759 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34760 }
34761 }
34762 lpfc_destroy_vport_work_array(phba, vports);
34763 - atomic_set(&phba->num_rsrc_err, 0);
34764 - atomic_set(&phba->num_cmd_success, 0);
34765 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34766 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34767 }
34768
34769 /**
34770 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34771 }
34772 }
34773 lpfc_destroy_vport_work_array(phba, vports);
34774 - atomic_set(&phba->num_rsrc_err, 0);
34775 - atomic_set(&phba->num_cmd_success, 0);
34776 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34777 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34778 }
34779
34780 /**
34781 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34782 index 5163edb..7b142bc 100644
34783 --- a/drivers/scsi/pmcraid.c
34784 +++ b/drivers/scsi/pmcraid.c
34785 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34786 res->scsi_dev = scsi_dev;
34787 scsi_dev->hostdata = res;
34788 res->change_detected = 0;
34789 - atomic_set(&res->read_failures, 0);
34790 - atomic_set(&res->write_failures, 0);
34791 + atomic_set_unchecked(&res->read_failures, 0);
34792 + atomic_set_unchecked(&res->write_failures, 0);
34793 rc = 0;
34794 }
34795 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34796 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34797
34798 /* If this was a SCSI read/write command keep count of errors */
34799 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34800 - atomic_inc(&res->read_failures);
34801 + atomic_inc_unchecked(&res->read_failures);
34802 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34803 - atomic_inc(&res->write_failures);
34804 + atomic_inc_unchecked(&res->write_failures);
34805
34806 if (!RES_IS_GSCSI(res->cfg_entry) &&
34807 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34808 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34809 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34810 * hrrq_id assigned here in queuecommand
34811 */
34812 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34813 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34814 pinstance->num_hrrq;
34815 cmd->cmd_done = pmcraid_io_done;
34816
34817 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34818 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34819 * hrrq_id assigned here in queuecommand
34820 */
34821 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34822 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34823 pinstance->num_hrrq;
34824
34825 if (request_size) {
34826 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34827
34828 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34829 /* add resources only after host is added into system */
34830 - if (!atomic_read(&pinstance->expose_resources))
34831 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34832 return;
34833
34834 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34835 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34836 init_waitqueue_head(&pinstance->reset_wait_q);
34837
34838 atomic_set(&pinstance->outstanding_cmds, 0);
34839 - atomic_set(&pinstance->last_message_id, 0);
34840 - atomic_set(&pinstance->expose_resources, 0);
34841 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34842 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34843
34844 INIT_LIST_HEAD(&pinstance->free_res_q);
34845 INIT_LIST_HEAD(&pinstance->used_res_q);
34846 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34847 /* Schedule worker thread to handle CCN and take care of adding and
34848 * removing devices to OS
34849 */
34850 - atomic_set(&pinstance->expose_resources, 1);
34851 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34852 schedule_work(&pinstance->worker_q);
34853 return rc;
34854
34855 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34856 index ca496c7..9c791d5 100644
34857 --- a/drivers/scsi/pmcraid.h
34858 +++ b/drivers/scsi/pmcraid.h
34859 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34860 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34861
34862 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34863 - atomic_t last_message_id;
34864 + atomic_unchecked_t last_message_id;
34865
34866 /* configuration table */
34867 struct pmcraid_config_table *cfg_table;
34868 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34869 atomic_t outstanding_cmds;
34870
34871 /* should add/delete resources to mid-layer now ?*/
34872 - atomic_t expose_resources;
34873 + atomic_unchecked_t expose_resources;
34874
34875
34876
34877 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34878 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34879 };
34880 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34881 - atomic_t read_failures; /* count of failed READ commands */
34882 - atomic_t write_failures; /* count of failed WRITE commands */
34883 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34884 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34885
34886 /* To indicate add/delete/modify during CCN */
34887 u8 change_detected;
34888 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34889 index fcf052c..a8025a4 100644
34890 --- a/drivers/scsi/qla2xxx/qla_def.h
34891 +++ b/drivers/scsi/qla2xxx/qla_def.h
34892 @@ -2244,7 +2244,7 @@ struct isp_operations {
34893 int (*get_flash_version) (struct scsi_qla_host *, void *);
34894 int (*start_scsi) (srb_t *);
34895 int (*abort_isp) (struct scsi_qla_host *);
34896 -};
34897 +} __no_const;
34898
34899 /* MSI-X Support *************************************************************/
34900
34901 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34902 index fd5edc6..4906148 100644
34903 --- a/drivers/scsi/qla4xxx/ql4_def.h
34904 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34905 @@ -258,7 +258,7 @@ struct ddb_entry {
34906 * (4000 only) */
34907 atomic_t relogin_timer; /* Max Time to wait for
34908 * relogin to complete */
34909 - atomic_t relogin_retry_count; /* Num of times relogin has been
34910 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34911 * retried */
34912 uint32_t default_time2wait; /* Default Min time between
34913 * relogins (+aens) */
34914 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34915 index 4169c8b..a8b896b 100644
34916 --- a/drivers/scsi/qla4xxx/ql4_os.c
34917 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34918 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34919 */
34920 if (!iscsi_is_session_online(cls_sess)) {
34921 /* Reset retry relogin timer */
34922 - atomic_inc(&ddb_entry->relogin_retry_count);
34923 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34924 DEBUG2(ql4_printk(KERN_INFO, ha,
34925 "%s: index[%d] relogin timed out-retrying"
34926 " relogin (%d), retry (%d)\n", __func__,
34927 ddb_entry->fw_ddb_index,
34928 - atomic_read(&ddb_entry->relogin_retry_count),
34929 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34930 ddb_entry->default_time2wait + 4));
34931 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34932 atomic_set(&ddb_entry->retry_relogin_timer,
34933 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34934
34935 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34936 atomic_set(&ddb_entry->relogin_timer, 0);
34937 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34938 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34939
34940 ddb_entry->default_relogin_timeout =
34941 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34942 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34943 index 2aeb2e9..46e3925 100644
34944 --- a/drivers/scsi/scsi.c
34945 +++ b/drivers/scsi/scsi.c
34946 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34947 unsigned long timeout;
34948 int rtn = 0;
34949
34950 - atomic_inc(&cmd->device->iorequest_cnt);
34951 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34952
34953 /* check if the device is still usable */
34954 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34955 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34956 index f85cfa6..a57c9e8 100644
34957 --- a/drivers/scsi/scsi_lib.c
34958 +++ b/drivers/scsi/scsi_lib.c
34959 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34960 shost = sdev->host;
34961 scsi_init_cmd_errh(cmd);
34962 cmd->result = DID_NO_CONNECT << 16;
34963 - atomic_inc(&cmd->device->iorequest_cnt);
34964 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34965
34966 /*
34967 * SCSI request completion path will do scsi_device_unbusy(),
34968 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34969
34970 INIT_LIST_HEAD(&cmd->eh_entry);
34971
34972 - atomic_inc(&cmd->device->iodone_cnt);
34973 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34974 if (cmd->result)
34975 - atomic_inc(&cmd->device->ioerr_cnt);
34976 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34977
34978 disposition = scsi_decide_disposition(cmd);
34979 if (disposition != SUCCESS &&
34980 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34981 index 04c2a27..9d8bd66 100644
34982 --- a/drivers/scsi/scsi_sysfs.c
34983 +++ b/drivers/scsi/scsi_sysfs.c
34984 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34985 char *buf) \
34986 { \
34987 struct scsi_device *sdev = to_scsi_device(dev); \
34988 - unsigned long long count = atomic_read(&sdev->field); \
34989 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34990 return snprintf(buf, 20, "0x%llx\n", count); \
34991 } \
34992 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34993 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34994 index 84a1fdf..693b0d6 100644
34995 --- a/drivers/scsi/scsi_tgt_lib.c
34996 +++ b/drivers/scsi/scsi_tgt_lib.c
34997 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34998 int err;
34999
35000 dprintk("%lx %u\n", uaddr, len);
35001 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
35002 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
35003 if (err) {
35004 /*
35005 * TODO: need to fixup sg_tablesize, max_segment_size,
35006 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
35007 index 1b21491..1b7f60e 100644
35008 --- a/drivers/scsi/scsi_transport_fc.c
35009 +++ b/drivers/scsi/scsi_transport_fc.c
35010 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
35011 * Netlink Infrastructure
35012 */
35013
35014 -static atomic_t fc_event_seq;
35015 +static atomic_unchecked_t fc_event_seq;
35016
35017 /**
35018 * fc_get_event_number - Obtain the next sequential FC event number
35019 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
35020 u32
35021 fc_get_event_number(void)
35022 {
35023 - return atomic_add_return(1, &fc_event_seq);
35024 + return atomic_add_return_unchecked(1, &fc_event_seq);
35025 }
35026 EXPORT_SYMBOL(fc_get_event_number);
35027
35028 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
35029 {
35030 int error;
35031
35032 - atomic_set(&fc_event_seq, 0);
35033 + atomic_set_unchecked(&fc_event_seq, 0);
35034
35035 error = transport_class_register(&fc_host_class);
35036 if (error)
35037 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
35038 char *cp;
35039
35040 *val = simple_strtoul(buf, &cp, 0);
35041 - if ((*cp && (*cp != '\n')) || (*val < 0))
35042 + if (*cp && (*cp != '\n'))
35043 return -EINVAL;
35044 /*
35045 * Check for overflow; dev_loss_tmo is u32
35046 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
35047 index 96029e6..4d77fa0 100644
35048 --- a/drivers/scsi/scsi_transport_iscsi.c
35049 +++ b/drivers/scsi/scsi_transport_iscsi.c
35050 @@ -79,7 +79,7 @@ struct iscsi_internal {
35051 struct transport_container session_cont;
35052 };
35053
35054 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
35055 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
35056 static struct workqueue_struct *iscsi_eh_timer_workq;
35057
35058 static DEFINE_IDA(iscsi_sess_ida);
35059 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
35060 int err;
35061
35062 ihost = shost->shost_data;
35063 - session->sid = atomic_add_return(1, &iscsi_session_nr);
35064 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
35065
35066 if (target_id == ISCSI_MAX_TARGET) {
35067 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
35068 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
35069 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
35070 ISCSI_TRANSPORT_VERSION);
35071
35072 - atomic_set(&iscsi_session_nr, 0);
35073 + atomic_set_unchecked(&iscsi_session_nr, 0);
35074
35075 err = class_register(&iscsi_transport_class);
35076 if (err)
35077 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
35078 index 21a045e..ec89e03 100644
35079 --- a/drivers/scsi/scsi_transport_srp.c
35080 +++ b/drivers/scsi/scsi_transport_srp.c
35081 @@ -33,7 +33,7 @@
35082 #include "scsi_transport_srp_internal.h"
35083
35084 struct srp_host_attrs {
35085 - atomic_t next_port_id;
35086 + atomic_unchecked_t next_port_id;
35087 };
35088 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
35089
35090 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
35091 struct Scsi_Host *shost = dev_to_shost(dev);
35092 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
35093
35094 - atomic_set(&srp_host->next_port_id, 0);
35095 + atomic_set_unchecked(&srp_host->next_port_id, 0);
35096 return 0;
35097 }
35098
35099 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
35100 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
35101 rport->roles = ids->roles;
35102
35103 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
35104 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
35105 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
35106
35107 transport_setup_device(&rport->dev);
35108 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
35109 index 441a1c5..07cece7 100644
35110 --- a/drivers/scsi/sg.c
35111 +++ b/drivers/scsi/sg.c
35112 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
35113 sdp->disk->disk_name,
35114 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
35115 NULL,
35116 - (char *)arg);
35117 + (char __user *)arg);
35118 case BLKTRACESTART:
35119 return blk_trace_startstop(sdp->device->request_queue, 1);
35120 case BLKTRACESTOP:
35121 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
35122 const struct file_operations * fops;
35123 };
35124
35125 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
35126 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
35127 {"allow_dio", &adio_fops},
35128 {"debug", &debug_fops},
35129 {"def_reserved_size", &dressz_fops},
35130 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
35131 {
35132 int k, mask;
35133 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
35134 - struct sg_proc_leaf * leaf;
35135 + const struct sg_proc_leaf * leaf;
35136
35137 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
35138 if (!sg_proc_sgp)
35139 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
35140 index f64250e..1ee3049 100644
35141 --- a/drivers/spi/spi-dw-pci.c
35142 +++ b/drivers/spi/spi-dw-pci.c
35143 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
35144 #define spi_resume NULL
35145 #endif
35146
35147 -static const struct pci_device_id pci_ids[] __devinitdata = {
35148 +static const struct pci_device_id pci_ids[] __devinitconst = {
35149 /* Intel MID platform SPI controller 0 */
35150 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
35151 {},
35152 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
35153 index 77eae99..b7cdcc9 100644
35154 --- a/drivers/spi/spi.c
35155 +++ b/drivers/spi/spi.c
35156 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
35157 EXPORT_SYMBOL_GPL(spi_bus_unlock);
35158
35159 /* portable code must never pass more than 32 bytes */
35160 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35161 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
35162
35163 static u8 *buf;
35164
35165 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
35166 index 436fe97..4082570 100644
35167 --- a/drivers/staging/gma500/power.c
35168 +++ b/drivers/staging/gma500/power.c
35169 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
35170 ret = gma_resume_pci(dev->pdev);
35171 if (ret == 0) {
35172 /* FIXME: we want to defer this for Medfield/Oaktrail */
35173 - gma_resume_display(dev);
35174 + gma_resume_display(dev->pdev);
35175 psb_irq_preinstall(dev);
35176 psb_irq_postinstall(dev);
35177 pm_runtime_get(&dev->pdev->dev);
35178 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
35179 index bafccb3..e3ac78d 100644
35180 --- a/drivers/staging/hv/rndis_filter.c
35181 +++ b/drivers/staging/hv/rndis_filter.c
35182 @@ -42,7 +42,7 @@ struct rndis_device {
35183
35184 enum rndis_device_state state;
35185 bool link_state;
35186 - atomic_t new_req_id;
35187 + atomic_unchecked_t new_req_id;
35188
35189 spinlock_t request_lock;
35190 struct list_head req_list;
35191 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35192 * template
35193 */
35194 set = &rndis_msg->msg.set_req;
35195 - set->req_id = atomic_inc_return(&dev->new_req_id);
35196 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35197
35198 /* Add to the request list */
35199 spin_lock_irqsave(&dev->request_lock, flags);
35200 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35201
35202 /* Setup the rndis set */
35203 halt = &request->request_msg.msg.halt_req;
35204 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35205 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35206
35207 /* Ignore return since this msg is optional. */
35208 rndis_filter_send_request(dev, request);
35209 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35210 index 9e8f010..af9efb5 100644
35211 --- a/drivers/staging/iio/buffer_generic.h
35212 +++ b/drivers/staging/iio/buffer_generic.h
35213 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35214
35215 int (*is_enabled)(struct iio_buffer *buffer);
35216 int (*enable)(struct iio_buffer *buffer);
35217 -};
35218 +} __no_const;
35219
35220 /**
35221 * struct iio_buffer_setup_ops - buffer setup related callbacks
35222 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35223 index 8b307b4..a97ac91 100644
35224 --- a/drivers/staging/octeon/ethernet-rx.c
35225 +++ b/drivers/staging/octeon/ethernet-rx.c
35226 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35227 /* Increment RX stats for virtual ports */
35228 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35229 #ifdef CONFIG_64BIT
35230 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35231 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35232 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35233 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35234 #else
35235 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35236 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35237 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35238 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35239 #endif
35240 }
35241 netif_receive_skb(skb);
35242 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35243 dev->name);
35244 */
35245 #ifdef CONFIG_64BIT
35246 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35247 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35248 #else
35249 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35250 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35251 #endif
35252 dev_kfree_skb_irq(skb);
35253 }
35254 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35255 index 076f866..2308070 100644
35256 --- a/drivers/staging/octeon/ethernet.c
35257 +++ b/drivers/staging/octeon/ethernet.c
35258 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35259 * since the RX tasklet also increments it.
35260 */
35261 #ifdef CONFIG_64BIT
35262 - atomic64_add(rx_status.dropped_packets,
35263 - (atomic64_t *)&priv->stats.rx_dropped);
35264 + atomic64_add_unchecked(rx_status.dropped_packets,
35265 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35266 #else
35267 - atomic_add(rx_status.dropped_packets,
35268 - (atomic_t *)&priv->stats.rx_dropped);
35269 + atomic_add_unchecked(rx_status.dropped_packets,
35270 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35271 #endif
35272 }
35273
35274 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35275 index 7a19555..466456d 100644
35276 --- a/drivers/staging/pohmelfs/inode.c
35277 +++ b/drivers/staging/pohmelfs/inode.c
35278 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35279 mutex_init(&psb->mcache_lock);
35280 psb->mcache_root = RB_ROOT;
35281 psb->mcache_timeout = msecs_to_jiffies(5000);
35282 - atomic_long_set(&psb->mcache_gen, 0);
35283 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35284
35285 psb->trans_max_pages = 100;
35286
35287 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35288 INIT_LIST_HEAD(&psb->crypto_ready_list);
35289 INIT_LIST_HEAD(&psb->crypto_active_list);
35290
35291 - atomic_set(&psb->trans_gen, 1);
35292 + atomic_set_unchecked(&psb->trans_gen, 1);
35293 atomic_long_set(&psb->total_inodes, 0);
35294
35295 mutex_init(&psb->state_lock);
35296 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35297 index e22665c..a2a9390 100644
35298 --- a/drivers/staging/pohmelfs/mcache.c
35299 +++ b/drivers/staging/pohmelfs/mcache.c
35300 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35301 m->data = data;
35302 m->start = start;
35303 m->size = size;
35304 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35305 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35306
35307 mutex_lock(&psb->mcache_lock);
35308 err = pohmelfs_mcache_insert(psb, m);
35309 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35310 index 985b6b7..7699e05 100644
35311 --- a/drivers/staging/pohmelfs/netfs.h
35312 +++ b/drivers/staging/pohmelfs/netfs.h
35313 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35314 struct pohmelfs_sb {
35315 struct rb_root mcache_root;
35316 struct mutex mcache_lock;
35317 - atomic_long_t mcache_gen;
35318 + atomic_long_unchecked_t mcache_gen;
35319 unsigned long mcache_timeout;
35320
35321 unsigned int idx;
35322
35323 unsigned int trans_retries;
35324
35325 - atomic_t trans_gen;
35326 + atomic_unchecked_t trans_gen;
35327
35328 unsigned int crypto_attached_size;
35329 unsigned int crypto_align_size;
35330 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35331 index 06c1a74..866eebc 100644
35332 --- a/drivers/staging/pohmelfs/trans.c
35333 +++ b/drivers/staging/pohmelfs/trans.c
35334 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35335 int err;
35336 struct netfs_cmd *cmd = t->iovec.iov_base;
35337
35338 - t->gen = atomic_inc_return(&psb->trans_gen);
35339 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35340
35341 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35342 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35343 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35344 index 86308a0..feaa925 100644
35345 --- a/drivers/staging/rtl8712/rtl871x_io.h
35346 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35347 @@ -108,7 +108,7 @@ struct _io_ops {
35348 u8 *pmem);
35349 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35350 u8 *pmem);
35351 -};
35352 +} __no_const;
35353
35354 struct io_req {
35355 struct list_head list;
35356 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35357 index c7b5e8b..783d6cb 100644
35358 --- a/drivers/staging/sbe-2t3e3/netdev.c
35359 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35360 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35361 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35362
35363 if (rlen)
35364 - if (copy_to_user(data, &resp, rlen))
35365 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35366 return -EFAULT;
35367
35368 return 0;
35369 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35370 index be21617..0954e45 100644
35371 --- a/drivers/staging/usbip/usbip_common.h
35372 +++ b/drivers/staging/usbip/usbip_common.h
35373 @@ -289,7 +289,7 @@ struct usbip_device {
35374 void (*shutdown)(struct usbip_device *);
35375 void (*reset)(struct usbip_device *);
35376 void (*unusable)(struct usbip_device *);
35377 - } eh_ops;
35378 + } __no_const eh_ops;
35379 };
35380
35381 #if 0
35382 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35383 index 88b3298..3783eee 100644
35384 --- a/drivers/staging/usbip/vhci.h
35385 +++ b/drivers/staging/usbip/vhci.h
35386 @@ -88,7 +88,7 @@ struct vhci_hcd {
35387 unsigned resuming:1;
35388 unsigned long re_timeout;
35389
35390 - atomic_t seqnum;
35391 + atomic_unchecked_t seqnum;
35392
35393 /*
35394 * NOTE:
35395 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35396 index 2ee97e2..0420b86 100644
35397 --- a/drivers/staging/usbip/vhci_hcd.c
35398 +++ b/drivers/staging/usbip/vhci_hcd.c
35399 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35400 return;
35401 }
35402
35403 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35404 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35405 if (priv->seqnum == 0xffff)
35406 dev_info(&urb->dev->dev, "seqnum max\n");
35407
35408 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35409 return -ENOMEM;
35410 }
35411
35412 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35413 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35414 if (unlink->seqnum == 0xffff)
35415 pr_info("seqnum max\n");
35416
35417 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35418 vdev->rhport = rhport;
35419 }
35420
35421 - atomic_set(&vhci->seqnum, 0);
35422 + atomic_set_unchecked(&vhci->seqnum, 0);
35423 spin_lock_init(&vhci->lock);
35424
35425 hcd->power_budget = 0; /* no limit */
35426 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35427 index 3872b8c..fe6d2f4 100644
35428 --- a/drivers/staging/usbip/vhci_rx.c
35429 +++ b/drivers/staging/usbip/vhci_rx.c
35430 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35431 if (!urb) {
35432 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35433 pr_info("max seqnum %d\n",
35434 - atomic_read(&the_controller->seqnum));
35435 + atomic_read_unchecked(&the_controller->seqnum));
35436 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35437 return;
35438 }
35439 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35440 index 7735027..30eed13 100644
35441 --- a/drivers/staging/vt6655/hostap.c
35442 +++ b/drivers/staging/vt6655/hostap.c
35443 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35444 *
35445 */
35446
35447 +static net_device_ops_no_const apdev_netdev_ops;
35448 +
35449 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35450 {
35451 PSDevice apdev_priv;
35452 struct net_device *dev = pDevice->dev;
35453 int ret;
35454 - const struct net_device_ops apdev_netdev_ops = {
35455 - .ndo_start_xmit = pDevice->tx_80211,
35456 - };
35457
35458 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35459
35460 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35461 *apdev_priv = *pDevice;
35462 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35463
35464 + /* only half broken now */
35465 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35466 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35467
35468 pDevice->apdev->type = ARPHRD_IEEE80211;
35469 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35470 index 51b5adf..098e320 100644
35471 --- a/drivers/staging/vt6656/hostap.c
35472 +++ b/drivers/staging/vt6656/hostap.c
35473 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35474 *
35475 */
35476
35477 +static net_device_ops_no_const apdev_netdev_ops;
35478 +
35479 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35480 {
35481 PSDevice apdev_priv;
35482 struct net_device *dev = pDevice->dev;
35483 int ret;
35484 - const struct net_device_ops apdev_netdev_ops = {
35485 - .ndo_start_xmit = pDevice->tx_80211,
35486 - };
35487
35488 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35489
35490 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35491 *apdev_priv = *pDevice;
35492 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35493
35494 + /* only half broken now */
35495 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35496 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35497
35498 pDevice->apdev->type = ARPHRD_IEEE80211;
35499 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35500 index 7843dfd..3db105f 100644
35501 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35502 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35503 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35504
35505 struct usbctlx_completor {
35506 int (*complete) (struct usbctlx_completor *);
35507 -};
35508 +} __no_const;
35509
35510 static int
35511 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35512 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35513 index 1ca66ea..76f1343 100644
35514 --- a/drivers/staging/zcache/tmem.c
35515 +++ b/drivers/staging/zcache/tmem.c
35516 @@ -39,7 +39,7 @@
35517 * A tmem host implementation must use this function to register callbacks
35518 * for memory allocation.
35519 */
35520 -static struct tmem_hostops tmem_hostops;
35521 +static tmem_hostops_no_const tmem_hostops;
35522
35523 static void tmem_objnode_tree_init(void);
35524
35525 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35526 * A tmem host implementation must use this function to register
35527 * callbacks for a page-accessible memory (PAM) implementation
35528 */
35529 -static struct tmem_pamops tmem_pamops;
35530 +static tmem_pamops_no_const tmem_pamops;
35531
35532 void tmem_register_pamops(struct tmem_pamops *m)
35533 {
35534 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35535 index ed147c4..94fc3c6 100644
35536 --- a/drivers/staging/zcache/tmem.h
35537 +++ b/drivers/staging/zcache/tmem.h
35538 @@ -180,6 +180,7 @@ struct tmem_pamops {
35539 void (*new_obj)(struct tmem_obj *);
35540 int (*replace_in_obj)(void *, struct tmem_obj *);
35541 };
35542 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35543 extern void tmem_register_pamops(struct tmem_pamops *m);
35544
35545 /* memory allocation methods provided by the host implementation */
35546 @@ -189,6 +190,7 @@ struct tmem_hostops {
35547 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35548 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35549 };
35550 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35551 extern void tmem_register_hostops(struct tmem_hostops *m);
35552
35553 /* core tmem accessor functions */
35554 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35555 index 0c1d5c73..88e90a8 100644
35556 --- a/drivers/target/iscsi/iscsi_target.c
35557 +++ b/drivers/target/iscsi/iscsi_target.c
35558 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35559 * outstanding_r2ts reaches zero, go ahead and send the delayed
35560 * TASK_ABORTED status.
35561 */
35562 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35563 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35564 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35565 if (--cmd->outstanding_r2ts < 1) {
35566 iscsit_stop_dataout_timer(cmd);
35567 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35568 index 6845228..df77141 100644
35569 --- a/drivers/target/target_core_tmr.c
35570 +++ b/drivers/target/target_core_tmr.c
35571 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35572 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35573 cmd->t_task_list_num,
35574 atomic_read(&cmd->t_task_cdbs_left),
35575 - atomic_read(&cmd->t_task_cdbs_sent),
35576 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35577 atomic_read(&cmd->t_transport_active),
35578 atomic_read(&cmd->t_transport_stop),
35579 atomic_read(&cmd->t_transport_sent));
35580 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35581 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35582 " task: %p, t_fe_count: %d dev: %p\n", task,
35583 fe_count, dev);
35584 - atomic_set(&cmd->t_transport_aborted, 1);
35585 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35586 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35587
35588 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35589 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35590 }
35591 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35592 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35593 - atomic_set(&cmd->t_transport_aborted, 1);
35594 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35595 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35596
35597 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35598 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35599 index e4ddb93..2fc6e0f 100644
35600 --- a/drivers/target/target_core_transport.c
35601 +++ b/drivers/target/target_core_transport.c
35602 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35603
35604 dev->queue_depth = dev_limits->queue_depth;
35605 atomic_set(&dev->depth_left, dev->queue_depth);
35606 - atomic_set(&dev->dev_ordered_id, 0);
35607 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35608
35609 se_dev_set_default_attribs(dev, dev_limits);
35610
35611 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35612 * Used to determine when ORDERED commands should go from
35613 * Dormant to Active status.
35614 */
35615 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35616 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35617 smp_mb__after_atomic_inc();
35618 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35619 cmd->se_ordered_id, cmd->sam_task_attr,
35620 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35621 " t_transport_active: %d t_transport_stop: %d"
35622 " t_transport_sent: %d\n", cmd->t_task_list_num,
35623 atomic_read(&cmd->t_task_cdbs_left),
35624 - atomic_read(&cmd->t_task_cdbs_sent),
35625 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35626 atomic_read(&cmd->t_task_cdbs_ex_left),
35627 atomic_read(&cmd->t_transport_active),
35628 atomic_read(&cmd->t_transport_stop),
35629 @@ -2089,9 +2089,9 @@ check_depth:
35630
35631 spin_lock_irqsave(&cmd->t_state_lock, flags);
35632 task->task_flags |= (TF_ACTIVE | TF_SENT);
35633 - atomic_inc(&cmd->t_task_cdbs_sent);
35634 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35635
35636 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35637 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35638 cmd->t_task_list_num)
35639 atomic_set(&cmd->t_transport_sent, 1);
35640
35641 @@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35642 atomic_set(&cmd->transport_lun_stop, 0);
35643 }
35644 if (!atomic_read(&cmd->t_transport_active) ||
35645 - atomic_read(&cmd->t_transport_aborted)) {
35646 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35647 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35648 return false;
35649 }
35650 @@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35651 {
35652 int ret = 0;
35653
35654 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35655 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35656 if (!send_status ||
35657 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35658 return 1;
35659 @@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35660 */
35661 if (cmd->data_direction == DMA_TO_DEVICE) {
35662 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35663 - atomic_inc(&cmd->t_transport_aborted);
35664 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35665 smp_mb__after_atomic_inc();
35666 }
35667 }
35668 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35669 index b9040be..e3f5aab 100644
35670 --- a/drivers/tty/hvc/hvcs.c
35671 +++ b/drivers/tty/hvc/hvcs.c
35672 @@ -83,6 +83,7 @@
35673 #include <asm/hvcserver.h>
35674 #include <asm/uaccess.h>
35675 #include <asm/vio.h>
35676 +#include <asm/local.h>
35677
35678 /*
35679 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35680 @@ -270,7 +271,7 @@ struct hvcs_struct {
35681 unsigned int index;
35682
35683 struct tty_struct *tty;
35684 - int open_count;
35685 + local_t open_count;
35686
35687 /*
35688 * Used to tell the driver kernel_thread what operations need to take
35689 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35690
35691 spin_lock_irqsave(&hvcsd->lock, flags);
35692
35693 - if (hvcsd->open_count > 0) {
35694 + if (local_read(&hvcsd->open_count) > 0) {
35695 spin_unlock_irqrestore(&hvcsd->lock, flags);
35696 printk(KERN_INFO "HVCS: vterm state unchanged. "
35697 "The hvcs device node is still in use.\n");
35698 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35699 if ((retval = hvcs_partner_connect(hvcsd)))
35700 goto error_release;
35701
35702 - hvcsd->open_count = 1;
35703 + local_set(&hvcsd->open_count, 1);
35704 hvcsd->tty = tty;
35705 tty->driver_data = hvcsd;
35706
35707 @@ -1179,7 +1180,7 @@ fast_open:
35708
35709 spin_lock_irqsave(&hvcsd->lock, flags);
35710 kref_get(&hvcsd->kref);
35711 - hvcsd->open_count++;
35712 + local_inc(&hvcsd->open_count);
35713 hvcsd->todo_mask |= HVCS_SCHED_READ;
35714 spin_unlock_irqrestore(&hvcsd->lock, flags);
35715
35716 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35717 hvcsd = tty->driver_data;
35718
35719 spin_lock_irqsave(&hvcsd->lock, flags);
35720 - if (--hvcsd->open_count == 0) {
35721 + if (local_dec_and_test(&hvcsd->open_count)) {
35722
35723 vio_disable_interrupts(hvcsd->vdev);
35724
35725 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35726 free_irq(irq, hvcsd);
35727 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35728 return;
35729 - } else if (hvcsd->open_count < 0) {
35730 + } else if (local_read(&hvcsd->open_count) < 0) {
35731 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35732 " is missmanaged.\n",
35733 - hvcsd->vdev->unit_address, hvcsd->open_count);
35734 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35735 }
35736
35737 spin_unlock_irqrestore(&hvcsd->lock, flags);
35738 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35739
35740 spin_lock_irqsave(&hvcsd->lock, flags);
35741 /* Preserve this so that we know how many kref refs to put */
35742 - temp_open_count = hvcsd->open_count;
35743 + temp_open_count = local_read(&hvcsd->open_count);
35744
35745 /*
35746 * Don't kref put inside the spinlock because the destruction
35747 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35748 hvcsd->tty->driver_data = NULL;
35749 hvcsd->tty = NULL;
35750
35751 - hvcsd->open_count = 0;
35752 + local_set(&hvcsd->open_count, 0);
35753
35754 /* This will drop any buffered data on the floor which is OK in a hangup
35755 * scenario. */
35756 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35757 * the middle of a write operation? This is a crummy place to do this
35758 * but we want to keep it all in the spinlock.
35759 */
35760 - if (hvcsd->open_count <= 0) {
35761 + if (local_read(&hvcsd->open_count) <= 0) {
35762 spin_unlock_irqrestore(&hvcsd->lock, flags);
35763 return -ENODEV;
35764 }
35765 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35766 {
35767 struct hvcs_struct *hvcsd = tty->driver_data;
35768
35769 - if (!hvcsd || hvcsd->open_count <= 0)
35770 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35771 return 0;
35772
35773 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35774 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35775 index ef92869..f4ebd88 100644
35776 --- a/drivers/tty/ipwireless/tty.c
35777 +++ b/drivers/tty/ipwireless/tty.c
35778 @@ -29,6 +29,7 @@
35779 #include <linux/tty_driver.h>
35780 #include <linux/tty_flip.h>
35781 #include <linux/uaccess.h>
35782 +#include <asm/local.h>
35783
35784 #include "tty.h"
35785 #include "network.h"
35786 @@ -51,7 +52,7 @@ struct ipw_tty {
35787 int tty_type;
35788 struct ipw_network *network;
35789 struct tty_struct *linux_tty;
35790 - int open_count;
35791 + local_t open_count;
35792 unsigned int control_lines;
35793 struct mutex ipw_tty_mutex;
35794 int tx_bytes_queued;
35795 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35796 mutex_unlock(&tty->ipw_tty_mutex);
35797 return -ENODEV;
35798 }
35799 - if (tty->open_count == 0)
35800 + if (local_read(&tty->open_count) == 0)
35801 tty->tx_bytes_queued = 0;
35802
35803 - tty->open_count++;
35804 + local_inc(&tty->open_count);
35805
35806 tty->linux_tty = linux_tty;
35807 linux_tty->driver_data = tty;
35808 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35809
35810 static void do_ipw_close(struct ipw_tty *tty)
35811 {
35812 - tty->open_count--;
35813 -
35814 - if (tty->open_count == 0) {
35815 + if (local_dec_return(&tty->open_count) == 0) {
35816 struct tty_struct *linux_tty = tty->linux_tty;
35817
35818 if (linux_tty != NULL) {
35819 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35820 return;
35821
35822 mutex_lock(&tty->ipw_tty_mutex);
35823 - if (tty->open_count == 0) {
35824 + if (local_read(&tty->open_count) == 0) {
35825 mutex_unlock(&tty->ipw_tty_mutex);
35826 return;
35827 }
35828 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35829 return;
35830 }
35831
35832 - if (!tty->open_count) {
35833 + if (!local_read(&tty->open_count)) {
35834 mutex_unlock(&tty->ipw_tty_mutex);
35835 return;
35836 }
35837 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35838 return -ENODEV;
35839
35840 mutex_lock(&tty->ipw_tty_mutex);
35841 - if (!tty->open_count) {
35842 + if (!local_read(&tty->open_count)) {
35843 mutex_unlock(&tty->ipw_tty_mutex);
35844 return -EINVAL;
35845 }
35846 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35847 if (!tty)
35848 return -ENODEV;
35849
35850 - if (!tty->open_count)
35851 + if (!local_read(&tty->open_count))
35852 return -EINVAL;
35853
35854 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35855 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35856 if (!tty)
35857 return 0;
35858
35859 - if (!tty->open_count)
35860 + if (!local_read(&tty->open_count))
35861 return 0;
35862
35863 return tty->tx_bytes_queued;
35864 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35865 if (!tty)
35866 return -ENODEV;
35867
35868 - if (!tty->open_count)
35869 + if (!local_read(&tty->open_count))
35870 return -EINVAL;
35871
35872 return get_control_lines(tty);
35873 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35874 if (!tty)
35875 return -ENODEV;
35876
35877 - if (!tty->open_count)
35878 + if (!local_read(&tty->open_count))
35879 return -EINVAL;
35880
35881 return set_control_lines(tty, set, clear);
35882 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35883 if (!tty)
35884 return -ENODEV;
35885
35886 - if (!tty->open_count)
35887 + if (!local_read(&tty->open_count))
35888 return -EINVAL;
35889
35890 /* FIXME: Exactly how is the tty object locked here .. */
35891 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35892 against a parallel ioctl etc */
35893 mutex_lock(&ttyj->ipw_tty_mutex);
35894 }
35895 - while (ttyj->open_count)
35896 + while (local_read(&ttyj->open_count))
35897 do_ipw_close(ttyj);
35898 ipwireless_disassociate_network_ttys(network,
35899 ttyj->channel_idx);
35900 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35901 index fc7bbba..9527e93 100644
35902 --- a/drivers/tty/n_gsm.c
35903 +++ b/drivers/tty/n_gsm.c
35904 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35905 kref_init(&dlci->ref);
35906 mutex_init(&dlci->mutex);
35907 dlci->fifo = &dlci->_fifo;
35908 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35909 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35910 kfree(dlci);
35911 return NULL;
35912 }
35913 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35914 index 39d6ab6..eb97f41 100644
35915 --- a/drivers/tty/n_tty.c
35916 +++ b/drivers/tty/n_tty.c
35917 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35918 {
35919 *ops = tty_ldisc_N_TTY;
35920 ops->owner = NULL;
35921 - ops->refcount = ops->flags = 0;
35922 + atomic_set(&ops->refcount, 0);
35923 + ops->flags = 0;
35924 }
35925 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35926 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35927 index e18604b..a7d5a11 100644
35928 --- a/drivers/tty/pty.c
35929 +++ b/drivers/tty/pty.c
35930 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35931 register_sysctl_table(pty_root_table);
35932
35933 /* Now create the /dev/ptmx special device */
35934 + pax_open_kernel();
35935 tty_default_fops(&ptmx_fops);
35936 - ptmx_fops.open = ptmx_open;
35937 + *(void **)&ptmx_fops.open = ptmx_open;
35938 + pax_close_kernel();
35939
35940 cdev_init(&ptmx_cdev, &ptmx_fops);
35941 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35942 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35943 index 2b42a01..32a2ed3 100644
35944 --- a/drivers/tty/serial/kgdboc.c
35945 +++ b/drivers/tty/serial/kgdboc.c
35946 @@ -24,8 +24,9 @@
35947 #define MAX_CONFIG_LEN 40
35948
35949 static struct kgdb_io kgdboc_io_ops;
35950 +static struct kgdb_io kgdboc_io_ops_console;
35951
35952 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35953 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35954 static int configured = -1;
35955
35956 static char config[MAX_CONFIG_LEN];
35957 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35958 kgdboc_unregister_kbd();
35959 if (configured == 1)
35960 kgdb_unregister_io_module(&kgdboc_io_ops);
35961 + else if (configured == 2)
35962 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35963 }
35964
35965 static int configure_kgdboc(void)
35966 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35967 int err;
35968 char *cptr = config;
35969 struct console *cons;
35970 + int is_console = 0;
35971
35972 err = kgdboc_option_setup(config);
35973 if (err || !strlen(config) || isspace(config[0]))
35974 goto noconfig;
35975
35976 err = -ENODEV;
35977 - kgdboc_io_ops.is_console = 0;
35978 kgdb_tty_driver = NULL;
35979
35980 kgdboc_use_kms = 0;
35981 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35982 int idx;
35983 if (cons->device && cons->device(cons, &idx) == p &&
35984 idx == tty_line) {
35985 - kgdboc_io_ops.is_console = 1;
35986 + is_console = 1;
35987 break;
35988 }
35989 cons = cons->next;
35990 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35991 kgdb_tty_line = tty_line;
35992
35993 do_register:
35994 - err = kgdb_register_io_module(&kgdboc_io_ops);
35995 + if (is_console) {
35996 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35997 + configured = 2;
35998 + } else {
35999 + err = kgdb_register_io_module(&kgdboc_io_ops);
36000 + configured = 1;
36001 + }
36002 if (err)
36003 goto noconfig;
36004
36005 - configured = 1;
36006 -
36007 return 0;
36008
36009 noconfig:
36010 @@ -213,7 +220,7 @@ noconfig:
36011 static int __init init_kgdboc(void)
36012 {
36013 /* Already configured? */
36014 - if (configured == 1)
36015 + if (configured >= 1)
36016 return 0;
36017
36018 return configure_kgdboc();
36019 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
36020 if (config[len - 1] == '\n')
36021 config[len - 1] = '\0';
36022
36023 - if (configured == 1)
36024 + if (configured >= 1)
36025 cleanup_kgdboc();
36026
36027 /* Go and configure with the new params. */
36028 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
36029 .post_exception = kgdboc_post_exp_handler,
36030 };
36031
36032 +static struct kgdb_io kgdboc_io_ops_console = {
36033 + .name = "kgdboc",
36034 + .read_char = kgdboc_get_char,
36035 + .write_char = kgdboc_put_char,
36036 + .pre_exception = kgdboc_pre_exp_handler,
36037 + .post_exception = kgdboc_post_exp_handler,
36038 + .is_console = 1
36039 +};
36040 +
36041 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
36042 /* This is only available if kgdboc is a built in for early debugging */
36043 static int __init kgdboc_early_init(char *opt)
36044 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
36045 index 05085be..67eadb0 100644
36046 --- a/drivers/tty/tty_io.c
36047 +++ b/drivers/tty/tty_io.c
36048 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
36049
36050 void tty_default_fops(struct file_operations *fops)
36051 {
36052 - *fops = tty_fops;
36053 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
36054 }
36055
36056 /*
36057 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
36058 index 8e0924f..4204eb4 100644
36059 --- a/drivers/tty/tty_ldisc.c
36060 +++ b/drivers/tty/tty_ldisc.c
36061 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
36062 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
36063 struct tty_ldisc_ops *ldo = ld->ops;
36064
36065 - ldo->refcount--;
36066 + atomic_dec(&ldo->refcount);
36067 module_put(ldo->owner);
36068 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36069
36070 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
36071 spin_lock_irqsave(&tty_ldisc_lock, flags);
36072 tty_ldiscs[disc] = new_ldisc;
36073 new_ldisc->num = disc;
36074 - new_ldisc->refcount = 0;
36075 + atomic_set(&new_ldisc->refcount, 0);
36076 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36077
36078 return ret;
36079 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
36080 return -EINVAL;
36081
36082 spin_lock_irqsave(&tty_ldisc_lock, flags);
36083 - if (tty_ldiscs[disc]->refcount)
36084 + if (atomic_read(&tty_ldiscs[disc]->refcount))
36085 ret = -EBUSY;
36086 else
36087 tty_ldiscs[disc] = NULL;
36088 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
36089 if (ldops) {
36090 ret = ERR_PTR(-EAGAIN);
36091 if (try_module_get(ldops->owner)) {
36092 - ldops->refcount++;
36093 + atomic_inc(&ldops->refcount);
36094 ret = ldops;
36095 }
36096 }
36097 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
36098 unsigned long flags;
36099
36100 spin_lock_irqsave(&tty_ldisc_lock, flags);
36101 - ldops->refcount--;
36102 + atomic_dec(&ldops->refcount);
36103 module_put(ldops->owner);
36104 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
36105 }
36106 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
36107 index a605549..6bd3c96 100644
36108 --- a/drivers/tty/vt/keyboard.c
36109 +++ b/drivers/tty/vt/keyboard.c
36110 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
36111 kbd->kbdmode == VC_OFF) &&
36112 value != KVAL(K_SAK))
36113 return; /* SAK is allowed even in raw mode */
36114 +
36115 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
36116 + {
36117 + void *func = fn_handler[value];
36118 + if (func == fn_show_state || func == fn_show_ptregs ||
36119 + func == fn_show_mem)
36120 + return;
36121 + }
36122 +#endif
36123 +
36124 fn_handler[value](vc);
36125 }
36126
36127 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
36128 index 65447c5..0526f0a 100644
36129 --- a/drivers/tty/vt/vt_ioctl.c
36130 +++ b/drivers/tty/vt/vt_ioctl.c
36131 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36132 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
36133 return -EFAULT;
36134
36135 - if (!capable(CAP_SYS_TTY_CONFIG))
36136 - perm = 0;
36137 -
36138 switch (cmd) {
36139 case KDGKBENT:
36140 key_map = key_maps[s];
36141 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
36142 val = (i ? K_HOLE : K_NOSUCHMAP);
36143 return put_user(val, &user_kbe->kb_value);
36144 case KDSKBENT:
36145 + if (!capable(CAP_SYS_TTY_CONFIG))
36146 + perm = 0;
36147 +
36148 if (!perm)
36149 return -EPERM;
36150 if (!i && v == K_NOSUCHMAP) {
36151 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36152 int i, j, k;
36153 int ret;
36154
36155 - if (!capable(CAP_SYS_TTY_CONFIG))
36156 - perm = 0;
36157 -
36158 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
36159 if (!kbs) {
36160 ret = -ENOMEM;
36161 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
36162 kfree(kbs);
36163 return ((p && *p) ? -EOVERFLOW : 0);
36164 case KDSKBSENT:
36165 + if (!capable(CAP_SYS_TTY_CONFIG))
36166 + perm = 0;
36167 +
36168 if (!perm) {
36169 ret = -EPERM;
36170 goto reterr;
36171 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
36172 index a783d53..cb30d94 100644
36173 --- a/drivers/uio/uio.c
36174 +++ b/drivers/uio/uio.c
36175 @@ -25,6 +25,7 @@
36176 #include <linux/kobject.h>
36177 #include <linux/cdev.h>
36178 #include <linux/uio_driver.h>
36179 +#include <asm/local.h>
36180
36181 #define UIO_MAX_DEVICES (1U << MINORBITS)
36182
36183 @@ -32,10 +33,10 @@ struct uio_device {
36184 struct module *owner;
36185 struct device *dev;
36186 int minor;
36187 - atomic_t event;
36188 + atomic_unchecked_t event;
36189 struct fasync_struct *async_queue;
36190 wait_queue_head_t wait;
36191 - int vma_count;
36192 + local_t vma_count;
36193 struct uio_info *info;
36194 struct kobject *map_dir;
36195 struct kobject *portio_dir;
36196 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36197 struct device_attribute *attr, char *buf)
36198 {
36199 struct uio_device *idev = dev_get_drvdata(dev);
36200 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36201 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36202 }
36203
36204 static struct device_attribute uio_class_attributes[] = {
36205 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36206 {
36207 struct uio_device *idev = info->uio_dev;
36208
36209 - atomic_inc(&idev->event);
36210 + atomic_inc_unchecked(&idev->event);
36211 wake_up_interruptible(&idev->wait);
36212 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36213 }
36214 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36215 }
36216
36217 listener->dev = idev;
36218 - listener->event_count = atomic_read(&idev->event);
36219 + listener->event_count = atomic_read_unchecked(&idev->event);
36220 filep->private_data = listener;
36221
36222 if (idev->info->open) {
36223 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36224 return -EIO;
36225
36226 poll_wait(filep, &idev->wait, wait);
36227 - if (listener->event_count != atomic_read(&idev->event))
36228 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36229 return POLLIN | POLLRDNORM;
36230 return 0;
36231 }
36232 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36233 do {
36234 set_current_state(TASK_INTERRUPTIBLE);
36235
36236 - event_count = atomic_read(&idev->event);
36237 + event_count = atomic_read_unchecked(&idev->event);
36238 if (event_count != listener->event_count) {
36239 if (copy_to_user(buf, &event_count, count))
36240 retval = -EFAULT;
36241 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36242 static void uio_vma_open(struct vm_area_struct *vma)
36243 {
36244 struct uio_device *idev = vma->vm_private_data;
36245 - idev->vma_count++;
36246 + local_inc(&idev->vma_count);
36247 }
36248
36249 static void uio_vma_close(struct vm_area_struct *vma)
36250 {
36251 struct uio_device *idev = vma->vm_private_data;
36252 - idev->vma_count--;
36253 + local_dec(&idev->vma_count);
36254 }
36255
36256 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36257 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36258 idev->owner = owner;
36259 idev->info = info;
36260 init_waitqueue_head(&idev->wait);
36261 - atomic_set(&idev->event, 0);
36262 + atomic_set_unchecked(&idev->event, 0);
36263
36264 ret = uio_get_minor(idev);
36265 if (ret)
36266 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36267 index a845f8b..4f54072 100644
36268 --- a/drivers/usb/atm/cxacru.c
36269 +++ b/drivers/usb/atm/cxacru.c
36270 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36271 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36272 if (ret < 2)
36273 return -EINVAL;
36274 - if (index < 0 || index > 0x7f)
36275 + if (index > 0x7f)
36276 return -EINVAL;
36277 pos += tmp;
36278
36279 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36280 index d3448ca..d2864ca 100644
36281 --- a/drivers/usb/atm/usbatm.c
36282 +++ b/drivers/usb/atm/usbatm.c
36283 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36284 if (printk_ratelimit())
36285 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36286 __func__, vpi, vci);
36287 - atomic_inc(&vcc->stats->rx_err);
36288 + atomic_inc_unchecked(&vcc->stats->rx_err);
36289 return;
36290 }
36291
36292 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36293 if (length > ATM_MAX_AAL5_PDU) {
36294 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36295 __func__, length, vcc);
36296 - atomic_inc(&vcc->stats->rx_err);
36297 + atomic_inc_unchecked(&vcc->stats->rx_err);
36298 goto out;
36299 }
36300
36301 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36302 if (sarb->len < pdu_length) {
36303 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36304 __func__, pdu_length, sarb->len, vcc);
36305 - atomic_inc(&vcc->stats->rx_err);
36306 + atomic_inc_unchecked(&vcc->stats->rx_err);
36307 goto out;
36308 }
36309
36310 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36311 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36312 __func__, vcc);
36313 - atomic_inc(&vcc->stats->rx_err);
36314 + atomic_inc_unchecked(&vcc->stats->rx_err);
36315 goto out;
36316 }
36317
36318 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36319 if (printk_ratelimit())
36320 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36321 __func__, length);
36322 - atomic_inc(&vcc->stats->rx_drop);
36323 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36324 goto out;
36325 }
36326
36327 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36328
36329 vcc->push(vcc, skb);
36330
36331 - atomic_inc(&vcc->stats->rx);
36332 + atomic_inc_unchecked(&vcc->stats->rx);
36333 out:
36334 skb_trim(sarb, 0);
36335 }
36336 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36337 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36338
36339 usbatm_pop(vcc, skb);
36340 - atomic_inc(&vcc->stats->tx);
36341 + atomic_inc_unchecked(&vcc->stats->tx);
36342
36343 skb = skb_dequeue(&instance->sndqueue);
36344 }
36345 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36346 if (!left--)
36347 return sprintf(page,
36348 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36349 - atomic_read(&atm_dev->stats.aal5.tx),
36350 - atomic_read(&atm_dev->stats.aal5.tx_err),
36351 - atomic_read(&atm_dev->stats.aal5.rx),
36352 - atomic_read(&atm_dev->stats.aal5.rx_err),
36353 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36354 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36355 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36356 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36357 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36358 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36359
36360 if (!left--) {
36361 if (instance->disconnected)
36362 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36363 index d956965..4179a77 100644
36364 --- a/drivers/usb/core/devices.c
36365 +++ b/drivers/usb/core/devices.c
36366 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36367 * time it gets called.
36368 */
36369 static struct device_connect_event {
36370 - atomic_t count;
36371 + atomic_unchecked_t count;
36372 wait_queue_head_t wait;
36373 } device_event = {
36374 .count = ATOMIC_INIT(1),
36375 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36376
36377 void usbfs_conn_disc_event(void)
36378 {
36379 - atomic_add(2, &device_event.count);
36380 + atomic_add_unchecked(2, &device_event.count);
36381 wake_up(&device_event.wait);
36382 }
36383
36384 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36385
36386 poll_wait(file, &device_event.wait, wait);
36387
36388 - event_count = atomic_read(&device_event.count);
36389 + event_count = atomic_read_unchecked(&device_event.count);
36390 if (file->f_version != event_count) {
36391 file->f_version = event_count;
36392 return POLLIN | POLLRDNORM;
36393 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36394 index b3bdfed..a9460e0 100644
36395 --- a/drivers/usb/core/message.c
36396 +++ b/drivers/usb/core/message.c
36397 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36398 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36399 if (buf) {
36400 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36401 - if (len > 0) {
36402 - smallbuf = kmalloc(++len, GFP_NOIO);
36403 + if (len++ > 0) {
36404 + smallbuf = kmalloc(len, GFP_NOIO);
36405 if (!smallbuf)
36406 return buf;
36407 memcpy(smallbuf, buf, len);
36408 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36409 index 1fc8f12..20647c1 100644
36410 --- a/drivers/usb/early/ehci-dbgp.c
36411 +++ b/drivers/usb/early/ehci-dbgp.c
36412 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36413
36414 #ifdef CONFIG_KGDB
36415 static struct kgdb_io kgdbdbgp_io_ops;
36416 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36417 +static struct kgdb_io kgdbdbgp_io_ops_console;
36418 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36419 #else
36420 #define dbgp_kgdb_mode (0)
36421 #endif
36422 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36423 .write_char = kgdbdbgp_write_char,
36424 };
36425
36426 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36427 + .name = "kgdbdbgp",
36428 + .read_char = kgdbdbgp_read_char,
36429 + .write_char = kgdbdbgp_write_char,
36430 + .is_console = 1
36431 +};
36432 +
36433 static int kgdbdbgp_wait_time;
36434
36435 static int __init kgdbdbgp_parse_config(char *str)
36436 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36437 ptr++;
36438 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36439 }
36440 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36441 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36442 + if (early_dbgp_console.index != -1)
36443 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36444 + else
36445 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36446
36447 return 0;
36448 }
36449 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36450 index d6bea3e..60b250e 100644
36451 --- a/drivers/usb/wusbcore/wa-hc.h
36452 +++ b/drivers/usb/wusbcore/wa-hc.h
36453 @@ -192,7 +192,7 @@ struct wahc {
36454 struct list_head xfer_delayed_list;
36455 spinlock_t xfer_list_lock;
36456 struct work_struct xfer_work;
36457 - atomic_t xfer_id_count;
36458 + atomic_unchecked_t xfer_id_count;
36459 };
36460
36461
36462 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36463 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36464 spin_lock_init(&wa->xfer_list_lock);
36465 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36466 - atomic_set(&wa->xfer_id_count, 1);
36467 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36468 }
36469
36470 /**
36471 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36472 index 57c01ab..8a05959 100644
36473 --- a/drivers/usb/wusbcore/wa-xfer.c
36474 +++ b/drivers/usb/wusbcore/wa-xfer.c
36475 @@ -296,7 +296,7 @@ out:
36476 */
36477 static void wa_xfer_id_init(struct wa_xfer *xfer)
36478 {
36479 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36480 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36481 }
36482
36483 /*
36484 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36485 index c14c42b..f955cc2 100644
36486 --- a/drivers/vhost/vhost.c
36487 +++ b/drivers/vhost/vhost.c
36488 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36489 return 0;
36490 }
36491
36492 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36493 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36494 {
36495 struct file *eventfp, *filep = NULL,
36496 *pollstart = NULL, *pollstop = NULL;
36497 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36498 index b0b2ac3..89a4399 100644
36499 --- a/drivers/video/aty/aty128fb.c
36500 +++ b/drivers/video/aty/aty128fb.c
36501 @@ -148,7 +148,7 @@ enum {
36502 };
36503
36504 /* Must match above enum */
36505 -static const char *r128_family[] __devinitdata = {
36506 +static const char *r128_family[] __devinitconst = {
36507 "AGP",
36508 "PCI",
36509 "PRO AGP",
36510 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36511 index 5c3960d..15cf8fc 100644
36512 --- a/drivers/video/fbcmap.c
36513 +++ b/drivers/video/fbcmap.c
36514 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36515 rc = -ENODEV;
36516 goto out;
36517 }
36518 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36519 - !info->fbops->fb_setcmap)) {
36520 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36521 rc = -EINVAL;
36522 goto out1;
36523 }
36524 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36525 index ad93629..e020fc3 100644
36526 --- a/drivers/video/fbmem.c
36527 +++ b/drivers/video/fbmem.c
36528 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36529 image->dx += image->width + 8;
36530 }
36531 } else if (rotate == FB_ROTATE_UD) {
36532 - for (x = 0; x < num && image->dx >= 0; x++) {
36533 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36534 info->fbops->fb_imageblit(info, image);
36535 image->dx -= image->width + 8;
36536 }
36537 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36538 image->dy += image->height + 8;
36539 }
36540 } else if (rotate == FB_ROTATE_CCW) {
36541 - for (x = 0; x < num && image->dy >= 0; x++) {
36542 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36543 info->fbops->fb_imageblit(info, image);
36544 image->dy -= image->height + 8;
36545 }
36546 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36547 return -EFAULT;
36548 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36549 return -EINVAL;
36550 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36551 + if (con2fb.framebuffer >= FB_MAX)
36552 return -EINVAL;
36553 if (!registered_fb[con2fb.framebuffer])
36554 request_module("fb%d", con2fb.framebuffer);
36555 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36556 index 5a5d092..265c5ed 100644
36557 --- a/drivers/video/geode/gx1fb_core.c
36558 +++ b/drivers/video/geode/gx1fb_core.c
36559 @@ -29,7 +29,7 @@ static int crt_option = 1;
36560 static char panel_option[32] = "";
36561
36562 /* Modes relevant to the GX1 (taken from modedb.c) */
36563 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36564 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36565 /* 640x480-60 VESA */
36566 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36567 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36568 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36569 index 0fad23f..0e9afa4 100644
36570 --- a/drivers/video/gxt4500.c
36571 +++ b/drivers/video/gxt4500.c
36572 @@ -156,7 +156,7 @@ struct gxt4500_par {
36573 static char *mode_option;
36574
36575 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36576 -static const struct fb_videomode defaultmode __devinitdata = {
36577 +static const struct fb_videomode defaultmode __devinitconst = {
36578 .refresh = 60,
36579 .xres = 1280,
36580 .yres = 1024,
36581 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36582 return 0;
36583 }
36584
36585 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36586 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36587 .id = "IBM GXT4500P",
36588 .type = FB_TYPE_PACKED_PIXELS,
36589 .visual = FB_VISUAL_PSEUDOCOLOR,
36590 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36591 index 7672d2e..b56437f 100644
36592 --- a/drivers/video/i810/i810_accel.c
36593 +++ b/drivers/video/i810/i810_accel.c
36594 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36595 }
36596 }
36597 printk("ringbuffer lockup!!!\n");
36598 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36599 i810_report_error(mmio);
36600 par->dev_flags |= LOCKUP;
36601 info->pixmap.scan_align = 1;
36602 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36603 index 318f6fb..9a389c1 100644
36604 --- a/drivers/video/i810/i810_main.c
36605 +++ b/drivers/video/i810/i810_main.c
36606 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36607 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36608
36609 /* PCI */
36610 -static const char *i810_pci_list[] __devinitdata = {
36611 +static const char *i810_pci_list[] __devinitconst = {
36612 "Intel(R) 810 Framebuffer Device" ,
36613 "Intel(R) 810-DC100 Framebuffer Device" ,
36614 "Intel(R) 810E Framebuffer Device" ,
36615 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36616 index de36693..3c63fc2 100644
36617 --- a/drivers/video/jz4740_fb.c
36618 +++ b/drivers/video/jz4740_fb.c
36619 @@ -136,7 +136,7 @@ struct jzfb {
36620 uint32_t pseudo_palette[16];
36621 };
36622
36623 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36624 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36625 .id = "JZ4740 FB",
36626 .type = FB_TYPE_PACKED_PIXELS,
36627 .visual = FB_VISUAL_TRUECOLOR,
36628 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36629 index 3c14e43..eafa544 100644
36630 --- a/drivers/video/logo/logo_linux_clut224.ppm
36631 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36632 @@ -1,1604 +1,1123 @@
36633 P3
36634 -# Standard 224-color Linux logo
36635 80 80
36636 255
36637 - 0 0 0 0 0 0 0 0 0 0 0 0
36638 - 0 0 0 0 0 0 0 0 0 0 0 0
36639 - 0 0 0 0 0 0 0 0 0 0 0 0
36640 - 0 0 0 0 0 0 0 0 0 0 0 0
36641 - 0 0 0 0 0 0 0 0 0 0 0 0
36642 - 0 0 0 0 0 0 0 0 0 0 0 0
36643 - 0 0 0 0 0 0 0 0 0 0 0 0
36644 - 0 0 0 0 0 0 0 0 0 0 0 0
36645 - 0 0 0 0 0 0 0 0 0 0 0 0
36646 - 6 6 6 6 6 6 10 10 10 10 10 10
36647 - 10 10 10 6 6 6 6 6 6 6 6 6
36648 - 0 0 0 0 0 0 0 0 0 0 0 0
36649 - 0 0 0 0 0 0 0 0 0 0 0 0
36650 - 0 0 0 0 0 0 0 0 0 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 0 0 0 0 0 0 0 0 0 0 0 0
36659 - 0 0 0 0 0 0 0 0 0 0 0 0
36660 - 0 0 0 0 0 0 0 0 0 0 0 0
36661 - 0 0 0 0 0 0 0 0 0 0 0 0
36662 - 0 0 0 0 0 0 0 0 0 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 6 6 6 10 10 10 14 14 14
36666 - 22 22 22 26 26 26 30 30 30 34 34 34
36667 - 30 30 30 30 30 30 26 26 26 18 18 18
36668 - 14 14 14 10 10 10 6 6 6 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 0 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 1 0 0 1 0 0 0
36679 - 0 0 0 0 0 0 0 0 0 0 0 0
36680 - 0 0 0 0 0 0 0 0 0 0 0 0
36681 - 0 0 0 0 0 0 0 0 0 0 0 0
36682 - 0 0 0 0 0 0 0 0 0 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 6 6 6 14 14 14 26 26 26 42 42 42
36686 - 54 54 54 66 66 66 78 78 78 78 78 78
36687 - 78 78 78 74 74 74 66 66 66 54 54 54
36688 - 42 42 42 26 26 26 18 18 18 10 10 10
36689 - 6 6 6 0 0 0 0 0 0 0 0 0
36690 - 0 0 0 0 0 0 0 0 0 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 1 0 0 0 0 0 0 0 0 0
36699 - 0 0 0 0 0 0 0 0 0 0 0 0
36700 - 0 0 0 0 0 0 0 0 0 0 0 0
36701 - 0 0 0 0 0 0 0 0 0 0 0 0
36702 - 0 0 0 0 0 0 0 0 0 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 10 10 10
36705 - 22 22 22 42 42 42 66 66 66 86 86 86
36706 - 66 66 66 38 38 38 38 38 38 22 22 22
36707 - 26 26 26 34 34 34 54 54 54 66 66 66
36708 - 86 86 86 70 70 70 46 46 46 26 26 26
36709 - 14 14 14 6 6 6 0 0 0 0 0 0
36710 - 0 0 0 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 1 0 0 1 0 0 1 0 0 0
36719 - 0 0 0 0 0 0 0 0 0 0 0 0
36720 - 0 0 0 0 0 0 0 0 0 0 0 0
36721 - 0 0 0 0 0 0 0 0 0 0 0 0
36722 - 0 0 0 0 0 0 0 0 0 0 0 0
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 10 10 10 26 26 26
36725 - 50 50 50 82 82 82 58 58 58 6 6 6
36726 - 2 2 6 2 2 6 2 2 6 2 2 6
36727 - 2 2 6 2 2 6 2 2 6 2 2 6
36728 - 6 6 6 54 54 54 86 86 86 66 66 66
36729 - 38 38 38 18 18 18 6 6 6 0 0 0
36730 - 0 0 0 0 0 0 0 0 0 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 0 0 0 0
36739 - 0 0 0 0 0 0 0 0 0 0 0 0
36740 - 0 0 0 0 0 0 0 0 0 0 0 0
36741 - 0 0 0 0 0 0 0 0 0 0 0 0
36742 - 0 0 0 0 0 0 0 0 0 0 0 0
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 6 6 6 22 22 22 50 50 50
36745 - 78 78 78 34 34 34 2 2 6 2 2 6
36746 - 2 2 6 2 2 6 2 2 6 2 2 6
36747 - 2 2 6 2 2 6 2 2 6 2 2 6
36748 - 2 2 6 2 2 6 6 6 6 70 70 70
36749 - 78 78 78 46 46 46 22 22 22 6 6 6
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 1 0 0 1 0 0 1 0 0 0
36759 - 0 0 0 0 0 0 0 0 0 0 0 0
36760 - 0 0 0 0 0 0 0 0 0 0 0 0
36761 - 0 0 0 0 0 0 0 0 0 0 0 0
36762 - 0 0 0 0 0 0 0 0 0 0 0 0
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 6 6 6 18 18 18 42 42 42 82 82 82
36765 - 26 26 26 2 2 6 2 2 6 2 2 6
36766 - 2 2 6 2 2 6 2 2 6 2 2 6
36767 - 2 2 6 2 2 6 2 2 6 14 14 14
36768 - 46 46 46 34 34 34 6 6 6 2 2 6
36769 - 42 42 42 78 78 78 42 42 42 18 18 18
36770 - 6 6 6 0 0 0 0 0 0 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 1 0 0 0 0 0 1 0 0 0
36779 - 0 0 0 0 0 0 0 0 0 0 0 0
36780 - 0 0 0 0 0 0 0 0 0 0 0 0
36781 - 0 0 0 0 0 0 0 0 0 0 0 0
36782 - 0 0 0 0 0 0 0 0 0 0 0 0
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 10 10 10 30 30 30 66 66 66 58 58 58
36785 - 2 2 6 2 2 6 2 2 6 2 2 6
36786 - 2 2 6 2 2 6 2 2 6 2 2 6
36787 - 2 2 6 2 2 6 2 2 6 26 26 26
36788 - 86 86 86 101 101 101 46 46 46 10 10 10
36789 - 2 2 6 58 58 58 70 70 70 34 34 34
36790 - 10 10 10 0 0 0 0 0 0 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 1 0 0 1 0 0 1 0 0 0
36799 - 0 0 0 0 0 0 0 0 0 0 0 0
36800 - 0 0 0 0 0 0 0 0 0 0 0 0
36801 - 0 0 0 0 0 0 0 0 0 0 0 0
36802 - 0 0 0 0 0 0 0 0 0 0 0 0
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 14 14 14 42 42 42 86 86 86 10 10 10
36805 - 2 2 6 2 2 6 2 2 6 2 2 6
36806 - 2 2 6 2 2 6 2 2 6 2 2 6
36807 - 2 2 6 2 2 6 2 2 6 30 30 30
36808 - 94 94 94 94 94 94 58 58 58 26 26 26
36809 - 2 2 6 6 6 6 78 78 78 54 54 54
36810 - 22 22 22 6 6 6 0 0 0 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 0 0 0
36819 - 0 0 0 0 0 0 0 0 0 0 0 0
36820 - 0 0 0 0 0 0 0 0 0 0 0 0
36821 - 0 0 0 0 0 0 0 0 0 0 0 0
36822 - 0 0 0 0 0 0 0 0 0 0 0 0
36823 - 0 0 0 0 0 0 0 0 0 6 6 6
36824 - 22 22 22 62 62 62 62 62 62 2 2 6
36825 - 2 2 6 2 2 6 2 2 6 2 2 6
36826 - 2 2 6 2 2 6 2 2 6 2 2 6
36827 - 2 2 6 2 2 6 2 2 6 26 26 26
36828 - 54 54 54 38 38 38 18 18 18 10 10 10
36829 - 2 2 6 2 2 6 34 34 34 82 82 82
36830 - 38 38 38 14 14 14 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 1 0 0 1 0 0 0
36839 - 0 0 0 0 0 0 0 0 0 0 0 0
36840 - 0 0 0 0 0 0 0 0 0 0 0 0
36841 - 0 0 0 0 0 0 0 0 0 0 0 0
36842 - 0 0 0 0 0 0 0 0 0 0 0 0
36843 - 0 0 0 0 0 0 0 0 0 6 6 6
36844 - 30 30 30 78 78 78 30 30 30 2 2 6
36845 - 2 2 6 2 2 6 2 2 6 2 2 6
36846 - 2 2 6 2 2 6 2 2 6 2 2 6
36847 - 2 2 6 2 2 6 2 2 6 10 10 10
36848 - 10 10 10 2 2 6 2 2 6 2 2 6
36849 - 2 2 6 2 2 6 2 2 6 78 78 78
36850 - 50 50 50 18 18 18 6 6 6 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 1 0 0 0 0 0 0 0 0 0
36859 - 0 0 0 0 0 0 0 0 0 0 0 0
36860 - 0 0 0 0 0 0 0 0 0 0 0 0
36861 - 0 0 0 0 0 0 0 0 0 0 0 0
36862 - 0 0 0 0 0 0 0 0 0 0 0 0
36863 - 0 0 0 0 0 0 0 0 0 10 10 10
36864 - 38 38 38 86 86 86 14 14 14 2 2 6
36865 - 2 2 6 2 2 6 2 2 6 2 2 6
36866 - 2 2 6 2 2 6 2 2 6 2 2 6
36867 - 2 2 6 2 2 6 2 2 6 2 2 6
36868 - 2 2 6 2 2 6 2 2 6 2 2 6
36869 - 2 2 6 2 2 6 2 2 6 54 54 54
36870 - 66 66 66 26 26 26 6 6 6 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 1 0 0 1 0 0 0
36879 - 0 0 0 0 0 0 0 0 0 0 0 0
36880 - 0 0 0 0 0 0 0 0 0 0 0 0
36881 - 0 0 0 0 0 0 0 0 0 0 0 0
36882 - 0 0 0 0 0 0 0 0 0 0 0 0
36883 - 0 0 0 0 0 0 0 0 0 14 14 14
36884 - 42 42 42 82 82 82 2 2 6 2 2 6
36885 - 2 2 6 6 6 6 10 10 10 2 2 6
36886 - 2 2 6 2 2 6 2 2 6 2 2 6
36887 - 2 2 6 2 2 6 2 2 6 6 6 6
36888 - 14 14 14 10 10 10 2 2 6 2 2 6
36889 - 2 2 6 2 2 6 2 2 6 18 18 18
36890 - 82 82 82 34 34 34 10 10 10 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 1 0 0 0 0 0 0 0 0 0
36899 - 0 0 0 0 0 0 0 0 0 0 0 0
36900 - 0 0 0 0 0 0 0 0 0 0 0 0
36901 - 0 0 0 0 0 0 0 0 0 0 0 0
36902 - 0 0 0 0 0 0 0 0 0 0 0 0
36903 - 0 0 0 0 0 0 0 0 0 14 14 14
36904 - 46 46 46 86 86 86 2 2 6 2 2 6
36905 - 6 6 6 6 6 6 22 22 22 34 34 34
36906 - 6 6 6 2 2 6 2 2 6 2 2 6
36907 - 2 2 6 2 2 6 18 18 18 34 34 34
36908 - 10 10 10 50 50 50 22 22 22 2 2 6
36909 - 2 2 6 2 2 6 2 2 6 10 10 10
36910 - 86 86 86 42 42 42 14 14 14 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 1 0 0 1 0 0 1 0 0 0
36919 - 0 0 0 0 0 0 0 0 0 0 0 0
36920 - 0 0 0 0 0 0 0 0 0 0 0 0
36921 - 0 0 0 0 0 0 0 0 0 0 0 0
36922 - 0 0 0 0 0 0 0 0 0 0 0 0
36923 - 0 0 0 0 0 0 0 0 0 14 14 14
36924 - 46 46 46 86 86 86 2 2 6 2 2 6
36925 - 38 38 38 116 116 116 94 94 94 22 22 22
36926 - 22 22 22 2 2 6 2 2 6 2 2 6
36927 - 14 14 14 86 86 86 138 138 138 162 162 162
36928 -154 154 154 38 38 38 26 26 26 6 6 6
36929 - 2 2 6 2 2 6 2 2 6 2 2 6
36930 - 86 86 86 46 46 46 14 14 14 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 0 0 0 0
36939 - 0 0 0 0 0 0 0 0 0 0 0 0
36940 - 0 0 0 0 0 0 0 0 0 0 0 0
36941 - 0 0 0 0 0 0 0 0 0 0 0 0
36942 - 0 0 0 0 0 0 0 0 0 0 0 0
36943 - 0 0 0 0 0 0 0 0 0 14 14 14
36944 - 46 46 46 86 86 86 2 2 6 14 14 14
36945 -134 134 134 198 198 198 195 195 195 116 116 116
36946 - 10 10 10 2 2 6 2 2 6 6 6 6
36947 -101 98 89 187 187 187 210 210 210 218 218 218
36948 -214 214 214 134 134 134 14 14 14 6 6 6
36949 - 2 2 6 2 2 6 2 2 6 2 2 6
36950 - 86 86 86 50 50 50 18 18 18 6 6 6
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 1 0 0 0
36958 - 0 0 1 0 0 1 0 0 1 0 0 0
36959 - 0 0 0 0 0 0 0 0 0 0 0 0
36960 - 0 0 0 0 0 0 0 0 0 0 0 0
36961 - 0 0 0 0 0 0 0 0 0 0 0 0
36962 - 0 0 0 0 0 0 0 0 0 0 0 0
36963 - 0 0 0 0 0 0 0 0 0 14 14 14
36964 - 46 46 46 86 86 86 2 2 6 54 54 54
36965 -218 218 218 195 195 195 226 226 226 246 246 246
36966 - 58 58 58 2 2 6 2 2 6 30 30 30
36967 -210 210 210 253 253 253 174 174 174 123 123 123
36968 -221 221 221 234 234 234 74 74 74 2 2 6
36969 - 2 2 6 2 2 6 2 2 6 2 2 6
36970 - 70 70 70 58 58 58 22 22 22 6 6 6
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 0 0 0
36979 - 0 0 0 0 0 0 0 0 0 0 0 0
36980 - 0 0 0 0 0 0 0 0 0 0 0 0
36981 - 0 0 0 0 0 0 0 0 0 0 0 0
36982 - 0 0 0 0 0 0 0 0 0 0 0 0
36983 - 0 0 0 0 0 0 0 0 0 14 14 14
36984 - 46 46 46 82 82 82 2 2 6 106 106 106
36985 -170 170 170 26 26 26 86 86 86 226 226 226
36986 -123 123 123 10 10 10 14 14 14 46 46 46
36987 -231 231 231 190 190 190 6 6 6 70 70 70
36988 - 90 90 90 238 238 238 158 158 158 2 2 6
36989 - 2 2 6 2 2 6 2 2 6 2 2 6
36990 - 70 70 70 58 58 58 22 22 22 6 6 6
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 1 0 0 0
36998 - 0 0 1 0 0 1 0 0 1 0 0 0
36999 - 0 0 0 0 0 0 0 0 0 0 0 0
37000 - 0 0 0 0 0 0 0 0 0 0 0 0
37001 - 0 0 0 0 0 0 0 0 0 0 0 0
37002 - 0 0 0 0 0 0 0 0 0 0 0 0
37003 - 0 0 0 0 0 0 0 0 0 14 14 14
37004 - 42 42 42 86 86 86 6 6 6 116 116 116
37005 -106 106 106 6 6 6 70 70 70 149 149 149
37006 -128 128 128 18 18 18 38 38 38 54 54 54
37007 -221 221 221 106 106 106 2 2 6 14 14 14
37008 - 46 46 46 190 190 190 198 198 198 2 2 6
37009 - 2 2 6 2 2 6 2 2 6 2 2 6
37010 - 74 74 74 62 62 62 22 22 22 6 6 6
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 1 0 0 0
37018 - 0 0 1 0 0 0 0 0 1 0 0 0
37019 - 0 0 0 0 0 0 0 0 0 0 0 0
37020 - 0 0 0 0 0 0 0 0 0 0 0 0
37021 - 0 0 0 0 0 0 0 0 0 0 0 0
37022 - 0 0 0 0 0 0 0 0 0 0 0 0
37023 - 0 0 0 0 0 0 0 0 0 14 14 14
37024 - 42 42 42 94 94 94 14 14 14 101 101 101
37025 -128 128 128 2 2 6 18 18 18 116 116 116
37026 -118 98 46 121 92 8 121 92 8 98 78 10
37027 -162 162 162 106 106 106 2 2 6 2 2 6
37028 - 2 2 6 195 195 195 195 195 195 6 6 6
37029 - 2 2 6 2 2 6 2 2 6 2 2 6
37030 - 74 74 74 62 62 62 22 22 22 6 6 6
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 1 0 0 1
37038 - 0 0 1 0 0 0 0 0 1 0 0 0
37039 - 0 0 0 0 0 0 0 0 0 0 0 0
37040 - 0 0 0 0 0 0 0 0 0 0 0 0
37041 - 0 0 0 0 0 0 0 0 0 0 0 0
37042 - 0 0 0 0 0 0 0 0 0 0 0 0
37043 - 0 0 0 0 0 0 0 0 0 10 10 10
37044 - 38 38 38 90 90 90 14 14 14 58 58 58
37045 -210 210 210 26 26 26 54 38 6 154 114 10
37046 -226 170 11 236 186 11 225 175 15 184 144 12
37047 -215 174 15 175 146 61 37 26 9 2 2 6
37048 - 70 70 70 246 246 246 138 138 138 2 2 6
37049 - 2 2 6 2 2 6 2 2 6 2 2 6
37050 - 70 70 70 66 66 66 26 26 26 6 6 6
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 0 0 0 0 0 0 0 0 0
37059 - 0 0 0 0 0 0 0 0 0 0 0 0
37060 - 0 0 0 0 0 0 0 0 0 0 0 0
37061 - 0 0 0 0 0 0 0 0 0 0 0 0
37062 - 0 0 0 0 0 0 0 0 0 0 0 0
37063 - 0 0 0 0 0 0 0 0 0 10 10 10
37064 - 38 38 38 86 86 86 14 14 14 10 10 10
37065 -195 195 195 188 164 115 192 133 9 225 175 15
37066 -239 182 13 234 190 10 232 195 16 232 200 30
37067 -245 207 45 241 208 19 232 195 16 184 144 12
37068 -218 194 134 211 206 186 42 42 42 2 2 6
37069 - 2 2 6 2 2 6 2 2 6 2 2 6
37070 - 50 50 50 74 74 74 30 30 30 6 6 6
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 0 0 0 0 0 0 0 0 0 0 0 0
37079 - 0 0 0 0 0 0 0 0 0 0 0 0
37080 - 0 0 0 0 0 0 0 0 0 0 0 0
37081 - 0 0 0 0 0 0 0 0 0 0 0 0
37082 - 0 0 0 0 0 0 0 0 0 0 0 0
37083 - 0 0 0 0 0 0 0 0 0 10 10 10
37084 - 34 34 34 86 86 86 14 14 14 2 2 6
37085 -121 87 25 192 133 9 219 162 10 239 182 13
37086 -236 186 11 232 195 16 241 208 19 244 214 54
37087 -246 218 60 246 218 38 246 215 20 241 208 19
37088 -241 208 19 226 184 13 121 87 25 2 2 6
37089 - 2 2 6 2 2 6 2 2 6 2 2 6
37090 - 50 50 50 82 82 82 34 34 34 10 10 10
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 0 0 0
37098 - 0 0 0 0 0 0 0 0 0 0 0 0
37099 - 0 0 0 0 0 0 0 0 0 0 0 0
37100 - 0 0 0 0 0 0 0 0 0 0 0 0
37101 - 0 0 0 0 0 0 0 0 0 0 0 0
37102 - 0 0 0 0 0 0 0 0 0 0 0 0
37103 - 0 0 0 0 0 0 0 0 0 10 10 10
37104 - 34 34 34 82 82 82 30 30 30 61 42 6
37105 -180 123 7 206 145 10 230 174 11 239 182 13
37106 -234 190 10 238 202 15 241 208 19 246 218 74
37107 -246 218 38 246 215 20 246 215 20 246 215 20
37108 -226 184 13 215 174 15 184 144 12 6 6 6
37109 - 2 2 6 2 2 6 2 2 6 2 2 6
37110 - 26 26 26 94 94 94 42 42 42 14 14 14
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 0 0 0
37118 - 0 0 0 0 0 0 0 0 0 0 0 0
37119 - 0 0 0 0 0 0 0 0 0 0 0 0
37120 - 0 0 0 0 0 0 0 0 0 0 0 0
37121 - 0 0 0 0 0 0 0 0 0 0 0 0
37122 - 0 0 0 0 0 0 0 0 0 0 0 0
37123 - 0 0 0 0 0 0 0 0 0 10 10 10
37124 - 30 30 30 78 78 78 50 50 50 104 69 6
37125 -192 133 9 216 158 10 236 178 12 236 186 11
37126 -232 195 16 241 208 19 244 214 54 245 215 43
37127 -246 215 20 246 215 20 241 208 19 198 155 10
37128 -200 144 11 216 158 10 156 118 10 2 2 6
37129 - 2 2 6 2 2 6 2 2 6 2 2 6
37130 - 6 6 6 90 90 90 54 54 54 18 18 18
37131 - 6 6 6 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 0 0 0 0 0 0 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 0 0 0 0 0 0
37138 - 0 0 0 0 0 0 0 0 0 0 0 0
37139 - 0 0 0 0 0 0 0 0 0 0 0 0
37140 - 0 0 0 0 0 0 0 0 0 0 0 0
37141 - 0 0 0 0 0 0 0 0 0 0 0 0
37142 - 0 0 0 0 0 0 0 0 0 0 0 0
37143 - 0 0 0 0 0 0 0 0 0 10 10 10
37144 - 30 30 30 78 78 78 46 46 46 22 22 22
37145 -137 92 6 210 162 10 239 182 13 238 190 10
37146 -238 202 15 241 208 19 246 215 20 246 215 20
37147 -241 208 19 203 166 17 185 133 11 210 150 10
37148 -216 158 10 210 150 10 102 78 10 2 2 6
37149 - 6 6 6 54 54 54 14 14 14 2 2 6
37150 - 2 2 6 62 62 62 74 74 74 30 30 30
37151 - 10 10 10 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 0 0 0 0 0 0 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 0 0 0 0 0 0 0 0 0
37158 - 0 0 0 0 0 0 0 0 0 0 0 0
37159 - 0 0 0 0 0 0 0 0 0 0 0 0
37160 - 0 0 0 0 0 0 0 0 0 0 0 0
37161 - 0 0 0 0 0 0 0 0 0 0 0 0
37162 - 0 0 0 0 0 0 0 0 0 0 0 0
37163 - 0 0 0 0 0 0 0 0 0 10 10 10
37164 - 34 34 34 78 78 78 50 50 50 6 6 6
37165 - 94 70 30 139 102 15 190 146 13 226 184 13
37166 -232 200 30 232 195 16 215 174 15 190 146 13
37167 -168 122 10 192 133 9 210 150 10 213 154 11
37168 -202 150 34 182 157 106 101 98 89 2 2 6
37169 - 2 2 6 78 78 78 116 116 116 58 58 58
37170 - 2 2 6 22 22 22 90 90 90 46 46 46
37171 - 18 18 18 6 6 6 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 0 0 0
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 0 0 0 0 0 0 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 0 0 0 0 0 0 0 0 0 0 0 0
37178 - 0 0 0 0 0 0 0 0 0 0 0 0
37179 - 0 0 0 0 0 0 0 0 0 0 0 0
37180 - 0 0 0 0 0 0 0 0 0 0 0 0
37181 - 0 0 0 0 0 0 0 0 0 0 0 0
37182 - 0 0 0 0 0 0 0 0 0 0 0 0
37183 - 0 0 0 0 0 0 0 0 0 10 10 10
37184 - 38 38 38 86 86 86 50 50 50 6 6 6
37185 -128 128 128 174 154 114 156 107 11 168 122 10
37186 -198 155 10 184 144 12 197 138 11 200 144 11
37187 -206 145 10 206 145 10 197 138 11 188 164 115
37188 -195 195 195 198 198 198 174 174 174 14 14 14
37189 - 2 2 6 22 22 22 116 116 116 116 116 116
37190 - 22 22 22 2 2 6 74 74 74 70 70 70
37191 - 30 30 30 10 10 10 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 0 0 0
37193 - 0 0 0 0 0 0 0 0 0 0 0 0
37194 - 0 0 0 0 0 0 0 0 0 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 0 0 0 0 0 0 0 0 0 0 0 0
37198 - 0 0 0 0 0 0 0 0 0 0 0 0
37199 - 0 0 0 0 0 0 0 0 0 0 0 0
37200 - 0 0 0 0 0 0 0 0 0 0 0 0
37201 - 0 0 0 0 0 0 0 0 0 0 0 0
37202 - 0 0 0 0 0 0 0 0 0 0 0 0
37203 - 0 0 0 0 0 0 6 6 6 18 18 18
37204 - 50 50 50 101 101 101 26 26 26 10 10 10
37205 -138 138 138 190 190 190 174 154 114 156 107 11
37206 -197 138 11 200 144 11 197 138 11 192 133 9
37207 -180 123 7 190 142 34 190 178 144 187 187 187
37208 -202 202 202 221 221 221 214 214 214 66 66 66
37209 - 2 2 6 2 2 6 50 50 50 62 62 62
37210 - 6 6 6 2 2 6 10 10 10 90 90 90
37211 - 50 50 50 18 18 18 6 6 6 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 0 0 0
37213 - 0 0 0 0 0 0 0 0 0 0 0 0
37214 - 0 0 0 0 0 0 0 0 0 0 0 0
37215 - 0 0 0 0 0 0 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 0 0 0 0 0 0 0 0 0 0 0 0
37218 - 0 0 0 0 0 0 0 0 0 0 0 0
37219 - 0 0 0 0 0 0 0 0 0 0 0 0
37220 - 0 0 0 0 0 0 0 0 0 0 0 0
37221 - 0 0 0 0 0 0 0 0 0 0 0 0
37222 - 0 0 0 0 0 0 0 0 0 0 0 0
37223 - 0 0 0 0 0 0 10 10 10 34 34 34
37224 - 74 74 74 74 74 74 2 2 6 6 6 6
37225 -144 144 144 198 198 198 190 190 190 178 166 146
37226 -154 121 60 156 107 11 156 107 11 168 124 44
37227 -174 154 114 187 187 187 190 190 190 210 210 210
37228 -246 246 246 253 253 253 253 253 253 182 182 182
37229 - 6 6 6 2 2 6 2 2 6 2 2 6
37230 - 2 2 6 2 2 6 2 2 6 62 62 62
37231 - 74 74 74 34 34 34 14 14 14 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 0 0 0
37233 - 0 0 0 0 0 0 0 0 0 0 0 0
37234 - 0 0 0 0 0 0 0 0 0 0 0 0
37235 - 0 0 0 0 0 0 0 0 0 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 0 0 0
37237 - 0 0 0 0 0 0 0 0 0 0 0 0
37238 - 0 0 0 0 0 0 0 0 0 0 0 0
37239 - 0 0 0 0 0 0 0 0 0 0 0 0
37240 - 0 0 0 0 0 0 0 0 0 0 0 0
37241 - 0 0 0 0 0 0 0 0 0 0 0 0
37242 - 0 0 0 0 0 0 0 0 0 0 0 0
37243 - 0 0 0 10 10 10 22 22 22 54 54 54
37244 - 94 94 94 18 18 18 2 2 6 46 46 46
37245 -234 234 234 221 221 221 190 190 190 190 190 190
37246 -190 190 190 187 187 187 187 187 187 190 190 190
37247 -190 190 190 195 195 195 214 214 214 242 242 242
37248 -253 253 253 253 253 253 253 253 253 253 253 253
37249 - 82 82 82 2 2 6 2 2 6 2 2 6
37250 - 2 2 6 2 2 6 2 2 6 14 14 14
37251 - 86 86 86 54 54 54 22 22 22 6 6 6
37252 - 0 0 0 0 0 0 0 0 0 0 0 0
37253 - 0 0 0 0 0 0 0 0 0 0 0 0
37254 - 0 0 0 0 0 0 0 0 0 0 0 0
37255 - 0 0 0 0 0 0 0 0 0 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 0 0 0
37257 - 0 0 0 0 0 0 0 0 0 0 0 0
37258 - 0 0 0 0 0 0 0 0 0 0 0 0
37259 - 0 0 0 0 0 0 0 0 0 0 0 0
37260 - 0 0 0 0 0 0 0 0 0 0 0 0
37261 - 0 0 0 0 0 0 0 0 0 0 0 0
37262 - 0 0 0 0 0 0 0 0 0 0 0 0
37263 - 6 6 6 18 18 18 46 46 46 90 90 90
37264 - 46 46 46 18 18 18 6 6 6 182 182 182
37265 -253 253 253 246 246 246 206 206 206 190 190 190
37266 -190 190 190 190 190 190 190 190 190 190 190 190
37267 -206 206 206 231 231 231 250 250 250 253 253 253
37268 -253 253 253 253 253 253 253 253 253 253 253 253
37269 -202 202 202 14 14 14 2 2 6 2 2 6
37270 - 2 2 6 2 2 6 2 2 6 2 2 6
37271 - 42 42 42 86 86 86 42 42 42 18 18 18
37272 - 6 6 6 0 0 0 0 0 0 0 0 0
37273 - 0 0 0 0 0 0 0 0 0 0 0 0
37274 - 0 0 0 0 0 0 0 0 0 0 0 0
37275 - 0 0 0 0 0 0 0 0 0 0 0 0
37276 - 0 0 0 0 0 0 0 0 0 0 0 0
37277 - 0 0 0 0 0 0 0 0 0 0 0 0
37278 - 0 0 0 0 0 0 0 0 0 0 0 0
37279 - 0 0 0 0 0 0 0 0 0 0 0 0
37280 - 0 0 0 0 0 0 0 0 0 0 0 0
37281 - 0 0 0 0 0 0 0 0 0 0 0 0
37282 - 0 0 0 0 0 0 0 0 0 6 6 6
37283 - 14 14 14 38 38 38 74 74 74 66 66 66
37284 - 2 2 6 6 6 6 90 90 90 250 250 250
37285 -253 253 253 253 253 253 238 238 238 198 198 198
37286 -190 190 190 190 190 190 195 195 195 221 221 221
37287 -246 246 246 253 253 253 253 253 253 253 253 253
37288 -253 253 253 253 253 253 253 253 253 253 253 253
37289 -253 253 253 82 82 82 2 2 6 2 2 6
37290 - 2 2 6 2 2 6 2 2 6 2 2 6
37291 - 2 2 6 78 78 78 70 70 70 34 34 34
37292 - 14 14 14 6 6 6 0 0 0 0 0 0
37293 - 0 0 0 0 0 0 0 0 0 0 0 0
37294 - 0 0 0 0 0 0 0 0 0 0 0 0
37295 - 0 0 0 0 0 0 0 0 0 0 0 0
37296 - 0 0 0 0 0 0 0 0 0 0 0 0
37297 - 0 0 0 0 0 0 0 0 0 0 0 0
37298 - 0 0 0 0 0 0 0 0 0 0 0 0
37299 - 0 0 0 0 0 0 0 0 0 0 0 0
37300 - 0 0 0 0 0 0 0 0 0 0 0 0
37301 - 0 0 0 0 0 0 0 0 0 0 0 0
37302 - 0 0 0 0 0 0 0 0 0 14 14 14
37303 - 34 34 34 66 66 66 78 78 78 6 6 6
37304 - 2 2 6 18 18 18 218 218 218 253 253 253
37305 -253 253 253 253 253 253 253 253 253 246 246 246
37306 -226 226 226 231 231 231 246 246 246 253 253 253
37307 -253 253 253 253 253 253 253 253 253 253 253 253
37308 -253 253 253 253 253 253 253 253 253 253 253 253
37309 -253 253 253 178 178 178 2 2 6 2 2 6
37310 - 2 2 6 2 2 6 2 2 6 2 2 6
37311 - 2 2 6 18 18 18 90 90 90 62 62 62
37312 - 30 30 30 10 10 10 0 0 0 0 0 0
37313 - 0 0 0 0 0 0 0 0 0 0 0 0
37314 - 0 0 0 0 0 0 0 0 0 0 0 0
37315 - 0 0 0 0 0 0 0 0 0 0 0 0
37316 - 0 0 0 0 0 0 0 0 0 0 0 0
37317 - 0 0 0 0 0 0 0 0 0 0 0 0
37318 - 0 0 0 0 0 0 0 0 0 0 0 0
37319 - 0 0 0 0 0 0 0 0 0 0 0 0
37320 - 0 0 0 0 0 0 0 0 0 0 0 0
37321 - 0 0 0 0 0 0 0 0 0 0 0 0
37322 - 0 0 0 0 0 0 10 10 10 26 26 26
37323 - 58 58 58 90 90 90 18 18 18 2 2 6
37324 - 2 2 6 110 110 110 253 253 253 253 253 253
37325 -253 253 253 253 253 253 253 253 253 253 253 253
37326 -250 250 250 253 253 253 253 253 253 253 253 253
37327 -253 253 253 253 253 253 253 253 253 253 253 253
37328 -253 253 253 253 253 253 253 253 253 253 253 253
37329 -253 253 253 231 231 231 18 18 18 2 2 6
37330 - 2 2 6 2 2 6 2 2 6 2 2 6
37331 - 2 2 6 2 2 6 18 18 18 94 94 94
37332 - 54 54 54 26 26 26 10 10 10 0 0 0
37333 - 0 0 0 0 0 0 0 0 0 0 0 0
37334 - 0 0 0 0 0 0 0 0 0 0 0 0
37335 - 0 0 0 0 0 0 0 0 0 0 0 0
37336 - 0 0 0 0 0 0 0 0 0 0 0 0
37337 - 0 0 0 0 0 0 0 0 0 0 0 0
37338 - 0 0 0 0 0 0 0 0 0 0 0 0
37339 - 0 0 0 0 0 0 0 0 0 0 0 0
37340 - 0 0 0 0 0 0 0 0 0 0 0 0
37341 - 0 0 0 0 0 0 0 0 0 0 0 0
37342 - 0 0 0 6 6 6 22 22 22 50 50 50
37343 - 90 90 90 26 26 26 2 2 6 2 2 6
37344 - 14 14 14 195 195 195 250 250 250 253 253 253
37345 -253 253 253 253 253 253 253 253 253 253 253 253
37346 -253 253 253 253 253 253 253 253 253 253 253 253
37347 -253 253 253 253 253 253 253 253 253 253 253 253
37348 -253 253 253 253 253 253 253 253 253 253 253 253
37349 -250 250 250 242 242 242 54 54 54 2 2 6
37350 - 2 2 6 2 2 6 2 2 6 2 2 6
37351 - 2 2 6 2 2 6 2 2 6 38 38 38
37352 - 86 86 86 50 50 50 22 22 22 6 6 6
37353 - 0 0 0 0 0 0 0 0 0 0 0 0
37354 - 0 0 0 0 0 0 0 0 0 0 0 0
37355 - 0 0 0 0 0 0 0 0 0 0 0 0
37356 - 0 0 0 0 0 0 0 0 0 0 0 0
37357 - 0 0 0 0 0 0 0 0 0 0 0 0
37358 - 0 0 0 0 0 0 0 0 0 0 0 0
37359 - 0 0 0 0 0 0 0 0 0 0 0 0
37360 - 0 0 0 0 0 0 0 0 0 0 0 0
37361 - 0 0 0 0 0 0 0 0 0 0 0 0
37362 - 6 6 6 14 14 14 38 38 38 82 82 82
37363 - 34 34 34 2 2 6 2 2 6 2 2 6
37364 - 42 42 42 195 195 195 246 246 246 253 253 253
37365 -253 253 253 253 253 253 253 253 253 250 250 250
37366 -242 242 242 242 242 242 250 250 250 253 253 253
37367 -253 253 253 253 253 253 253 253 253 253 253 253
37368 -253 253 253 250 250 250 246 246 246 238 238 238
37369 -226 226 226 231 231 231 101 101 101 6 6 6
37370 - 2 2 6 2 2 6 2 2 6 2 2 6
37371 - 2 2 6 2 2 6 2 2 6 2 2 6
37372 - 38 38 38 82 82 82 42 42 42 14 14 14
37373 - 6 6 6 0 0 0 0 0 0 0 0 0
37374 - 0 0 0 0 0 0 0 0 0 0 0 0
37375 - 0 0 0 0 0 0 0 0 0 0 0 0
37376 - 0 0 0 0 0 0 0 0 0 0 0 0
37377 - 0 0 0 0 0 0 0 0 0 0 0 0
37378 - 0 0 0 0 0 0 0 0 0 0 0 0
37379 - 0 0 0 0 0 0 0 0 0 0 0 0
37380 - 0 0 0 0 0 0 0 0 0 0 0 0
37381 - 0 0 0 0 0 0 0 0 0 0 0 0
37382 - 10 10 10 26 26 26 62 62 62 66 66 66
37383 - 2 2 6 2 2 6 2 2 6 6 6 6
37384 - 70 70 70 170 170 170 206 206 206 234 234 234
37385 -246 246 246 250 250 250 250 250 250 238 238 238
37386 -226 226 226 231 231 231 238 238 238 250 250 250
37387 -250 250 250 250 250 250 246 246 246 231 231 231
37388 -214 214 214 206 206 206 202 202 202 202 202 202
37389 -198 198 198 202 202 202 182 182 182 18 18 18
37390 - 2 2 6 2 2 6 2 2 6 2 2 6
37391 - 2 2 6 2 2 6 2 2 6 2 2 6
37392 - 2 2 6 62 62 62 66 66 66 30 30 30
37393 - 10 10 10 0 0 0 0 0 0 0 0 0
37394 - 0 0 0 0 0 0 0 0 0 0 0 0
37395 - 0 0 0 0 0 0 0 0 0 0 0 0
37396 - 0 0 0 0 0 0 0 0 0 0 0 0
37397 - 0 0 0 0 0 0 0 0 0 0 0 0
37398 - 0 0 0 0 0 0 0 0 0 0 0 0
37399 - 0 0 0 0 0 0 0 0 0 0 0 0
37400 - 0 0 0 0 0 0 0 0 0 0 0 0
37401 - 0 0 0 0 0 0 0 0 0 0 0 0
37402 - 14 14 14 42 42 42 82 82 82 18 18 18
37403 - 2 2 6 2 2 6 2 2 6 10 10 10
37404 - 94 94 94 182 182 182 218 218 218 242 242 242
37405 -250 250 250 253 253 253 253 253 253 250 250 250
37406 -234 234 234 253 253 253 253 253 253 253 253 253
37407 -253 253 253 253 253 253 253 253 253 246 246 246
37408 -238 238 238 226 226 226 210 210 210 202 202 202
37409 -195 195 195 195 195 195 210 210 210 158 158 158
37410 - 6 6 6 14 14 14 50 50 50 14 14 14
37411 - 2 2 6 2 2 6 2 2 6 2 2 6
37412 - 2 2 6 6 6 6 86 86 86 46 46 46
37413 - 18 18 18 6 6 6 0 0 0 0 0 0
37414 - 0 0 0 0 0 0 0 0 0 0 0 0
37415 - 0 0 0 0 0 0 0 0 0 0 0 0
37416 - 0 0 0 0 0 0 0 0 0 0 0 0
37417 - 0 0 0 0 0 0 0 0 0 0 0 0
37418 - 0 0 0 0 0 0 0 0 0 0 0 0
37419 - 0 0 0 0 0 0 0 0 0 0 0 0
37420 - 0 0 0 0 0 0 0 0 0 0 0 0
37421 - 0 0 0 0 0 0 0 0 0 6 6 6
37422 - 22 22 22 54 54 54 70 70 70 2 2 6
37423 - 2 2 6 10 10 10 2 2 6 22 22 22
37424 -166 166 166 231 231 231 250 250 250 253 253 253
37425 -253 253 253 253 253 253 253 253 253 250 250 250
37426 -242 242 242 253 253 253 253 253 253 253 253 253
37427 -253 253 253 253 253 253 253 253 253 253 253 253
37428 -253 253 253 253 253 253 253 253 253 246 246 246
37429 -231 231 231 206 206 206 198 198 198 226 226 226
37430 - 94 94 94 2 2 6 6 6 6 38 38 38
37431 - 30 30 30 2 2 6 2 2 6 2 2 6
37432 - 2 2 6 2 2 6 62 62 62 66 66 66
37433 - 26 26 26 10 10 10 0 0 0 0 0 0
37434 - 0 0 0 0 0 0 0 0 0 0 0 0
37435 - 0 0 0 0 0 0 0 0 0 0 0 0
37436 - 0 0 0 0 0 0 0 0 0 0 0 0
37437 - 0 0 0 0 0 0 0 0 0 0 0 0
37438 - 0 0 0 0 0 0 0 0 0 0 0 0
37439 - 0 0 0 0 0 0 0 0 0 0 0 0
37440 - 0 0 0 0 0 0 0 0 0 0 0 0
37441 - 0 0 0 0 0 0 0 0 0 10 10 10
37442 - 30 30 30 74 74 74 50 50 50 2 2 6
37443 - 26 26 26 26 26 26 2 2 6 106 106 106
37444 -238 238 238 253 253 253 253 253 253 253 253 253
37445 -253 253 253 253 253 253 253 253 253 253 253 253
37446 -253 253 253 253 253 253 253 253 253 253 253 253
37447 -253 253 253 253 253 253 253 253 253 253 253 253
37448 -253 253 253 253 253 253 253 253 253 253 253 253
37449 -253 253 253 246 246 246 218 218 218 202 202 202
37450 -210 210 210 14 14 14 2 2 6 2 2 6
37451 - 30 30 30 22 22 22 2 2 6 2 2 6
37452 - 2 2 6 2 2 6 18 18 18 86 86 86
37453 - 42 42 42 14 14 14 0 0 0 0 0 0
37454 - 0 0 0 0 0 0 0 0 0 0 0 0
37455 - 0 0 0 0 0 0 0 0 0 0 0 0
37456 - 0 0 0 0 0 0 0 0 0 0 0 0
37457 - 0 0 0 0 0 0 0 0 0 0 0 0
37458 - 0 0 0 0 0 0 0 0 0 0 0 0
37459 - 0 0 0 0 0 0 0 0 0 0 0 0
37460 - 0 0 0 0 0 0 0 0 0 0 0 0
37461 - 0 0 0 0 0 0 0 0 0 14 14 14
37462 - 42 42 42 90 90 90 22 22 22 2 2 6
37463 - 42 42 42 2 2 6 18 18 18 218 218 218
37464 -253 253 253 253 253 253 253 253 253 253 253 253
37465 -253 253 253 253 253 253 253 253 253 253 253 253
37466 -253 253 253 253 253 253 253 253 253 253 253 253
37467 -253 253 253 253 253 253 253 253 253 253 253 253
37468 -253 253 253 253 253 253 253 253 253 253 253 253
37469 -253 253 253 253 253 253 250 250 250 221 221 221
37470 -218 218 218 101 101 101 2 2 6 14 14 14
37471 - 18 18 18 38 38 38 10 10 10 2 2 6
37472 - 2 2 6 2 2 6 2 2 6 78 78 78
37473 - 58 58 58 22 22 22 6 6 6 0 0 0
37474 - 0 0 0 0 0 0 0 0 0 0 0 0
37475 - 0 0 0 0 0 0 0 0 0 0 0 0
37476 - 0 0 0 0 0 0 0 0 0 0 0 0
37477 - 0 0 0 0 0 0 0 0 0 0 0 0
37478 - 0 0 0 0 0 0 0 0 0 0 0 0
37479 - 0 0 0 0 0 0 0 0 0 0 0 0
37480 - 0 0 0 0 0 0 0 0 0 0 0 0
37481 - 0 0 0 0 0 0 6 6 6 18 18 18
37482 - 54 54 54 82 82 82 2 2 6 26 26 26
37483 - 22 22 22 2 2 6 123 123 123 253 253 253
37484 -253 253 253 253 253 253 253 253 253 253 253 253
37485 -253 253 253 253 253 253 253 253 253 253 253 253
37486 -253 253 253 253 253 253 253 253 253 253 253 253
37487 -253 253 253 253 253 253 253 253 253 253 253 253
37488 -253 253 253 253 253 253 253 253 253 253 253 253
37489 -253 253 253 253 253 253 253 253 253 250 250 250
37490 -238 238 238 198 198 198 6 6 6 38 38 38
37491 - 58 58 58 26 26 26 38 38 38 2 2 6
37492 - 2 2 6 2 2 6 2 2 6 46 46 46
37493 - 78 78 78 30 30 30 10 10 10 0 0 0
37494 - 0 0 0 0 0 0 0 0 0 0 0 0
37495 - 0 0 0 0 0 0 0 0 0 0 0 0
37496 - 0 0 0 0 0 0 0 0 0 0 0 0
37497 - 0 0 0 0 0 0 0 0 0 0 0 0
37498 - 0 0 0 0 0 0 0 0 0 0 0 0
37499 - 0 0 0 0 0 0 0 0 0 0 0 0
37500 - 0 0 0 0 0 0 0 0 0 0 0 0
37501 - 0 0 0 0 0 0 10 10 10 30 30 30
37502 - 74 74 74 58 58 58 2 2 6 42 42 42
37503 - 2 2 6 22 22 22 231 231 231 253 253 253
37504 -253 253 253 253 253 253 253 253 253 253 253 253
37505 -253 253 253 253 253 253 253 253 253 250 250 250
37506 -253 253 253 253 253 253 253 253 253 253 253 253
37507 -253 253 253 253 253 253 253 253 253 253 253 253
37508 -253 253 253 253 253 253 253 253 253 253 253 253
37509 -253 253 253 253 253 253 253 253 253 253 253 253
37510 -253 253 253 246 246 246 46 46 46 38 38 38
37511 - 42 42 42 14 14 14 38 38 38 14 14 14
37512 - 2 2 6 2 2 6 2 2 6 6 6 6
37513 - 86 86 86 46 46 46 14 14 14 0 0 0
37514 - 0 0 0 0 0 0 0 0 0 0 0 0
37515 - 0 0 0 0 0 0 0 0 0 0 0 0
37516 - 0 0 0 0 0 0 0 0 0 0 0 0
37517 - 0 0 0 0 0 0 0 0 0 0 0 0
37518 - 0 0 0 0 0 0 0 0 0 0 0 0
37519 - 0 0 0 0 0 0 0 0 0 0 0 0
37520 - 0 0 0 0 0 0 0 0 0 0 0 0
37521 - 0 0 0 6 6 6 14 14 14 42 42 42
37522 - 90 90 90 18 18 18 18 18 18 26 26 26
37523 - 2 2 6 116 116 116 253 253 253 253 253 253
37524 -253 253 253 253 253 253 253 253 253 253 253 253
37525 -253 253 253 253 253 253 250 250 250 238 238 238
37526 -253 253 253 253 253 253 253 253 253 253 253 253
37527 -253 253 253 253 253 253 253 253 253 253 253 253
37528 -253 253 253 253 253 253 253 253 253 253 253 253
37529 -253 253 253 253 253 253 253 253 253 253 253 253
37530 -253 253 253 253 253 253 94 94 94 6 6 6
37531 - 2 2 6 2 2 6 10 10 10 34 34 34
37532 - 2 2 6 2 2 6 2 2 6 2 2 6
37533 - 74 74 74 58 58 58 22 22 22 6 6 6
37534 - 0 0 0 0 0 0 0 0 0 0 0 0
37535 - 0 0 0 0 0 0 0 0 0 0 0 0
37536 - 0 0 0 0 0 0 0 0 0 0 0 0
37537 - 0 0 0 0 0 0 0 0 0 0 0 0
37538 - 0 0 0 0 0 0 0 0 0 0 0 0
37539 - 0 0 0 0 0 0 0 0 0 0 0 0
37540 - 0 0 0 0 0 0 0 0 0 0 0 0
37541 - 0 0 0 10 10 10 26 26 26 66 66 66
37542 - 82 82 82 2 2 6 38 38 38 6 6 6
37543 - 14 14 14 210 210 210 253 253 253 253 253 253
37544 -253 253 253 253 253 253 253 253 253 253 253 253
37545 -253 253 253 253 253 253 246 246 246 242 242 242
37546 -253 253 253 253 253 253 253 253 253 253 253 253
37547 -253 253 253 253 253 253 253 253 253 253 253 253
37548 -253 253 253 253 253 253 253 253 253 253 253 253
37549 -253 253 253 253 253 253 253 253 253 253 253 253
37550 -253 253 253 253 253 253 144 144 144 2 2 6
37551 - 2 2 6 2 2 6 2 2 6 46 46 46
37552 - 2 2 6 2 2 6 2 2 6 2 2 6
37553 - 42 42 42 74 74 74 30 30 30 10 10 10
37554 - 0 0 0 0 0 0 0 0 0 0 0 0
37555 - 0 0 0 0 0 0 0 0 0 0 0 0
37556 - 0 0 0 0 0 0 0 0 0 0 0 0
37557 - 0 0 0 0 0 0 0 0 0 0 0 0
37558 - 0 0 0 0 0 0 0 0 0 0 0 0
37559 - 0 0 0 0 0 0 0 0 0 0 0 0
37560 - 0 0 0 0 0 0 0 0 0 0 0 0
37561 - 6 6 6 14 14 14 42 42 42 90 90 90
37562 - 26 26 26 6 6 6 42 42 42 2 2 6
37563 - 74 74 74 250 250 250 253 253 253 253 253 253
37564 -253 253 253 253 253 253 253 253 253 253 253 253
37565 -253 253 253 253 253 253 242 242 242 242 242 242
37566 -253 253 253 253 253 253 253 253 253 253 253 253
37567 -253 253 253 253 253 253 253 253 253 253 253 253
37568 -253 253 253 253 253 253 253 253 253 253 253 253
37569 -253 253 253 253 253 253 253 253 253 253 253 253
37570 -253 253 253 253 253 253 182 182 182 2 2 6
37571 - 2 2 6 2 2 6 2 2 6 46 46 46
37572 - 2 2 6 2 2 6 2 2 6 2 2 6
37573 - 10 10 10 86 86 86 38 38 38 10 10 10
37574 - 0 0 0 0 0 0 0 0 0 0 0 0
37575 - 0 0 0 0 0 0 0 0 0 0 0 0
37576 - 0 0 0 0 0 0 0 0 0 0 0 0
37577 - 0 0 0 0 0 0 0 0 0 0 0 0
37578 - 0 0 0 0 0 0 0 0 0 0 0 0
37579 - 0 0 0 0 0 0 0 0 0 0 0 0
37580 - 0 0 0 0 0 0 0 0 0 0 0 0
37581 - 10 10 10 26 26 26 66 66 66 82 82 82
37582 - 2 2 6 22 22 22 18 18 18 2 2 6
37583 -149 149 149 253 253 253 253 253 253 253 253 253
37584 -253 253 253 253 253 253 253 253 253 253 253 253
37585 -253 253 253 253 253 253 234 234 234 242 242 242
37586 -253 253 253 253 253 253 253 253 253 253 253 253
37587 -253 253 253 253 253 253 253 253 253 253 253 253
37588 -253 253 253 253 253 253 253 253 253 253 253 253
37589 -253 253 253 253 253 253 253 253 253 253 253 253
37590 -253 253 253 253 253 253 206 206 206 2 2 6
37591 - 2 2 6 2 2 6 2 2 6 38 38 38
37592 - 2 2 6 2 2 6 2 2 6 2 2 6
37593 - 6 6 6 86 86 86 46 46 46 14 14 14
37594 - 0 0 0 0 0 0 0 0 0 0 0 0
37595 - 0 0 0 0 0 0 0 0 0 0 0 0
37596 - 0 0 0 0 0 0 0 0 0 0 0 0
37597 - 0 0 0 0 0 0 0 0 0 0 0 0
37598 - 0 0 0 0 0 0 0 0 0 0 0 0
37599 - 0 0 0 0 0 0 0 0 0 0 0 0
37600 - 0 0 0 0 0 0 0 0 0 6 6 6
37601 - 18 18 18 46 46 46 86 86 86 18 18 18
37602 - 2 2 6 34 34 34 10 10 10 6 6 6
37603 -210 210 210 253 253 253 253 253 253 253 253 253
37604 -253 253 253 253 253 253 253 253 253 253 253 253
37605 -253 253 253 253 253 253 234 234 234 242 242 242
37606 -253 253 253 253 253 253 253 253 253 253 253 253
37607 -253 253 253 253 253 253 253 253 253 253 253 253
37608 -253 253 253 253 253 253 253 253 253 253 253 253
37609 -253 253 253 253 253 253 253 253 253 253 253 253
37610 -253 253 253 253 253 253 221 221 221 6 6 6
37611 - 2 2 6 2 2 6 6 6 6 30 30 30
37612 - 2 2 6 2 2 6 2 2 6 2 2 6
37613 - 2 2 6 82 82 82 54 54 54 18 18 18
37614 - 6 6 6 0 0 0 0 0 0 0 0 0
37615 - 0 0 0 0 0 0 0 0 0 0 0 0
37616 - 0 0 0 0 0 0 0 0 0 0 0 0
37617 - 0 0 0 0 0 0 0 0 0 0 0 0
37618 - 0 0 0 0 0 0 0 0 0 0 0 0
37619 - 0 0 0 0 0 0 0 0 0 0 0 0
37620 - 0 0 0 0 0 0 0 0 0 10 10 10
37621 - 26 26 26 66 66 66 62 62 62 2 2 6
37622 - 2 2 6 38 38 38 10 10 10 26 26 26
37623 -238 238 238 253 253 253 253 253 253 253 253 253
37624 -253 253 253 253 253 253 253 253 253 253 253 253
37625 -253 253 253 253 253 253 231 231 231 238 238 238
37626 -253 253 253 253 253 253 253 253 253 253 253 253
37627 -253 253 253 253 253 253 253 253 253 253 253 253
37628 -253 253 253 253 253 253 253 253 253 253 253 253
37629 -253 253 253 253 253 253 253 253 253 253 253 253
37630 -253 253 253 253 253 253 231 231 231 6 6 6
37631 - 2 2 6 2 2 6 10 10 10 30 30 30
37632 - 2 2 6 2 2 6 2 2 6 2 2 6
37633 - 2 2 6 66 66 66 58 58 58 22 22 22
37634 - 6 6 6 0 0 0 0 0 0 0 0 0
37635 - 0 0 0 0 0 0 0 0 0 0 0 0
37636 - 0 0 0 0 0 0 0 0 0 0 0 0
37637 - 0 0 0 0 0 0 0 0 0 0 0 0
37638 - 0 0 0 0 0 0 0 0 0 0 0 0
37639 - 0 0 0 0 0 0 0 0 0 0 0 0
37640 - 0 0 0 0 0 0 0 0 0 10 10 10
37641 - 38 38 38 78 78 78 6 6 6 2 2 6
37642 - 2 2 6 46 46 46 14 14 14 42 42 42
37643 -246 246 246 253 253 253 253 253 253 253 253 253
37644 -253 253 253 253 253 253 253 253 253 253 253 253
37645 -253 253 253 253 253 253 231 231 231 242 242 242
37646 -253 253 253 253 253 253 253 253 253 253 253 253
37647 -253 253 253 253 253 253 253 253 253 253 253 253
37648 -253 253 253 253 253 253 253 253 253 253 253 253
37649 -253 253 253 253 253 253 253 253 253 253 253 253
37650 -253 253 253 253 253 253 234 234 234 10 10 10
37651 - 2 2 6 2 2 6 22 22 22 14 14 14
37652 - 2 2 6 2 2 6 2 2 6 2 2 6
37653 - 2 2 6 66 66 66 62 62 62 22 22 22
37654 - 6 6 6 0 0 0 0 0 0 0 0 0
37655 - 0 0 0 0 0 0 0 0 0 0 0 0
37656 - 0 0 0 0 0 0 0 0 0 0 0 0
37657 - 0 0 0 0 0 0 0 0 0 0 0 0
37658 - 0 0 0 0 0 0 0 0 0 0 0 0
37659 - 0 0 0 0 0 0 0 0 0 0 0 0
37660 - 0 0 0 0 0 0 6 6 6 18 18 18
37661 - 50 50 50 74 74 74 2 2 6 2 2 6
37662 - 14 14 14 70 70 70 34 34 34 62 62 62
37663 -250 250 250 253 253 253 253 253 253 253 253 253
37664 -253 253 253 253 253 253 253 253 253 253 253 253
37665 -253 253 253 253 253 253 231 231 231 246 246 246
37666 -253 253 253 253 253 253 253 253 253 253 253 253
37667 -253 253 253 253 253 253 253 253 253 253 253 253
37668 -253 253 253 253 253 253 253 253 253 253 253 253
37669 -253 253 253 253 253 253 253 253 253 253 253 253
37670 -253 253 253 253 253 253 234 234 234 14 14 14
37671 - 2 2 6 2 2 6 30 30 30 2 2 6
37672 - 2 2 6 2 2 6 2 2 6 2 2 6
37673 - 2 2 6 66 66 66 62 62 62 22 22 22
37674 - 6 6 6 0 0 0 0 0 0 0 0 0
37675 - 0 0 0 0 0 0 0 0 0 0 0 0
37676 - 0 0 0 0 0 0 0 0 0 0 0 0
37677 - 0 0 0 0 0 0 0 0 0 0 0 0
37678 - 0 0 0 0 0 0 0 0 0 0 0 0
37679 - 0 0 0 0 0 0 0 0 0 0 0 0
37680 - 0 0 0 0 0 0 6 6 6 18 18 18
37681 - 54 54 54 62 62 62 2 2 6 2 2 6
37682 - 2 2 6 30 30 30 46 46 46 70 70 70
37683 -250 250 250 253 253 253 253 253 253 253 253 253
37684 -253 253 253 253 253 253 253 253 253 253 253 253
37685 -253 253 253 253 253 253 231 231 231 246 246 246
37686 -253 253 253 253 253 253 253 253 253 253 253 253
37687 -253 253 253 253 253 253 253 253 253 253 253 253
37688 -253 253 253 253 253 253 253 253 253 253 253 253
37689 -253 253 253 253 253 253 253 253 253 253 253 253
37690 -253 253 253 253 253 253 226 226 226 10 10 10
37691 - 2 2 6 6 6 6 30 30 30 2 2 6
37692 - 2 2 6 2 2 6 2 2 6 2 2 6
37693 - 2 2 6 66 66 66 58 58 58 22 22 22
37694 - 6 6 6 0 0 0 0 0 0 0 0 0
37695 - 0 0 0 0 0 0 0 0 0 0 0 0
37696 - 0 0 0 0 0 0 0 0 0 0 0 0
37697 - 0 0 0 0 0 0 0 0 0 0 0 0
37698 - 0 0 0 0 0 0 0 0 0 0 0 0
37699 - 0 0 0 0 0 0 0 0 0 0 0 0
37700 - 0 0 0 0 0 0 6 6 6 22 22 22
37701 - 58 58 58 62 62 62 2 2 6 2 2 6
37702 - 2 2 6 2 2 6 30 30 30 78 78 78
37703 -250 250 250 253 253 253 253 253 253 253 253 253
37704 -253 253 253 253 253 253 253 253 253 253 253 253
37705 -253 253 253 253 253 253 231 231 231 246 246 246
37706 -253 253 253 253 253 253 253 253 253 253 253 253
37707 -253 253 253 253 253 253 253 253 253 253 253 253
37708 -253 253 253 253 253 253 253 253 253 253 253 253
37709 -253 253 253 253 253 253 253 253 253 253 253 253
37710 -253 253 253 253 253 253 206 206 206 2 2 6
37711 - 22 22 22 34 34 34 18 14 6 22 22 22
37712 - 26 26 26 18 18 18 6 6 6 2 2 6
37713 - 2 2 6 82 82 82 54 54 54 18 18 18
37714 - 6 6 6 0 0 0 0 0 0 0 0 0
37715 - 0 0 0 0 0 0 0 0 0 0 0 0
37716 - 0 0 0 0 0 0 0 0 0 0 0 0
37717 - 0 0 0 0 0 0 0 0 0 0 0 0
37718 - 0 0 0 0 0 0 0 0 0 0 0 0
37719 - 0 0 0 0 0 0 0 0 0 0 0 0
37720 - 0 0 0 0 0 0 6 6 6 26 26 26
37721 - 62 62 62 106 106 106 74 54 14 185 133 11
37722 -210 162 10 121 92 8 6 6 6 62 62 62
37723 -238 238 238 253 253 253 253 253 253 253 253 253
37724 -253 253 253 253 253 253 253 253 253 253 253 253
37725 -253 253 253 253 253 253 231 231 231 246 246 246
37726 -253 253 253 253 253 253 253 253 253 253 253 253
37727 -253 253 253 253 253 253 253 253 253 253 253 253
37728 -253 253 253 253 253 253 253 253 253 253 253 253
37729 -253 253 253 253 253 253 253 253 253 253 253 253
37730 -253 253 253 253 253 253 158 158 158 18 18 18
37731 - 14 14 14 2 2 6 2 2 6 2 2 6
37732 - 6 6 6 18 18 18 66 66 66 38 38 38
37733 - 6 6 6 94 94 94 50 50 50 18 18 18
37734 - 6 6 6 0 0 0 0 0 0 0 0 0
37735 - 0 0 0 0 0 0 0 0 0 0 0 0
37736 - 0 0 0 0 0 0 0 0 0 0 0 0
37737 - 0 0 0 0 0 0 0 0 0 0 0 0
37738 - 0 0 0 0 0 0 0 0 0 0 0 0
37739 - 0 0 0 0 0 0 0 0 0 6 6 6
37740 - 10 10 10 10 10 10 18 18 18 38 38 38
37741 - 78 78 78 142 134 106 216 158 10 242 186 14
37742 -246 190 14 246 190 14 156 118 10 10 10 10
37743 - 90 90 90 238 238 238 253 253 253 253 253 253
37744 -253 253 253 253 253 253 253 253 253 253 253 253
37745 -253 253 253 253 253 253 231 231 231 250 250 250
37746 -253 253 253 253 253 253 253 253 253 253 253 253
37747 -253 253 253 253 253 253 253 253 253 253 253 253
37748 -253 253 253 253 253 253 253 253 253 253 253 253
37749 -253 253 253 253 253 253 253 253 253 246 230 190
37750 -238 204 91 238 204 91 181 142 44 37 26 9
37751 - 2 2 6 2 2 6 2 2 6 2 2 6
37752 - 2 2 6 2 2 6 38 38 38 46 46 46
37753 - 26 26 26 106 106 106 54 54 54 18 18 18
37754 - 6 6 6 0 0 0 0 0 0 0 0 0
37755 - 0 0 0 0 0 0 0 0 0 0 0 0
37756 - 0 0 0 0 0 0 0 0 0 0 0 0
37757 - 0 0 0 0 0 0 0 0 0 0 0 0
37758 - 0 0 0 0 0 0 0 0 0 0 0 0
37759 - 0 0 0 6 6 6 14 14 14 22 22 22
37760 - 30 30 30 38 38 38 50 50 50 70 70 70
37761 -106 106 106 190 142 34 226 170 11 242 186 14
37762 -246 190 14 246 190 14 246 190 14 154 114 10
37763 - 6 6 6 74 74 74 226 226 226 253 253 253
37764 -253 253 253 253 253 253 253 253 253 253 253 253
37765 -253 253 253 253 253 253 231 231 231 250 250 250
37766 -253 253 253 253 253 253 253 253 253 253 253 253
37767 -253 253 253 253 253 253 253 253 253 253 253 253
37768 -253 253 253 253 253 253 253 253 253 253 253 253
37769 -253 253 253 253 253 253 253 253 253 228 184 62
37770 -241 196 14 241 208 19 232 195 16 38 30 10
37771 - 2 2 6 2 2 6 2 2 6 2 2 6
37772 - 2 2 6 6 6 6 30 30 30 26 26 26
37773 -203 166 17 154 142 90 66 66 66 26 26 26
37774 - 6 6 6 0 0 0 0 0 0 0 0 0
37775 - 0 0 0 0 0 0 0 0 0 0 0 0
37776 - 0 0 0 0 0 0 0 0 0 0 0 0
37777 - 0 0 0 0 0 0 0 0 0 0 0 0
37778 - 0 0 0 0 0 0 0 0 0 0 0 0
37779 - 6 6 6 18 18 18 38 38 38 58 58 58
37780 - 78 78 78 86 86 86 101 101 101 123 123 123
37781 -175 146 61 210 150 10 234 174 13 246 186 14
37782 -246 190 14 246 190 14 246 190 14 238 190 10
37783 -102 78 10 2 2 6 46 46 46 198 198 198
37784 -253 253 253 253 253 253 253 253 253 253 253 253
37785 -253 253 253 253 253 253 234 234 234 242 242 242
37786 -253 253 253 253 253 253 253 253 253 253 253 253
37787 -253 253 253 253 253 253 253 253 253 253 253 253
37788 -253 253 253 253 253 253 253 253 253 253 253 253
37789 -253 253 253 253 253 253 253 253 253 224 178 62
37790 -242 186 14 241 196 14 210 166 10 22 18 6
37791 - 2 2 6 2 2 6 2 2 6 2 2 6
37792 - 2 2 6 2 2 6 6 6 6 121 92 8
37793 -238 202 15 232 195 16 82 82 82 34 34 34
37794 - 10 10 10 0 0 0 0 0 0 0 0 0
37795 - 0 0 0 0 0 0 0 0 0 0 0 0
37796 - 0 0 0 0 0 0 0 0 0 0 0 0
37797 - 0 0 0 0 0 0 0 0 0 0 0 0
37798 - 0 0 0 0 0 0 0 0 0 0 0 0
37799 - 14 14 14 38 38 38 70 70 70 154 122 46
37800 -190 142 34 200 144 11 197 138 11 197 138 11
37801 -213 154 11 226 170 11 242 186 14 246 190 14
37802 -246 190 14 246 190 14 246 190 14 246 190 14
37803 -225 175 15 46 32 6 2 2 6 22 22 22
37804 -158 158 158 250 250 250 253 253 253 253 253 253
37805 -253 253 253 253 253 253 253 253 253 253 253 253
37806 -253 253 253 253 253 253 253 253 253 253 253 253
37807 -253 253 253 253 253 253 253 253 253 253 253 253
37808 -253 253 253 253 253 253 253 253 253 253 253 253
37809 -253 253 253 250 250 250 242 242 242 224 178 62
37810 -239 182 13 236 186 11 213 154 11 46 32 6
37811 - 2 2 6 2 2 6 2 2 6 2 2 6
37812 - 2 2 6 2 2 6 61 42 6 225 175 15
37813 -238 190 10 236 186 11 112 100 78 42 42 42
37814 - 14 14 14 0 0 0 0 0 0 0 0 0
37815 - 0 0 0 0 0 0 0 0 0 0 0 0
37816 - 0 0 0 0 0 0 0 0 0 0 0 0
37817 - 0 0 0 0 0 0 0 0 0 0 0 0
37818 - 0 0 0 0 0 0 0 0 0 6 6 6
37819 - 22 22 22 54 54 54 154 122 46 213 154 11
37820 -226 170 11 230 174 11 226 170 11 226 170 11
37821 -236 178 12 242 186 14 246 190 14 246 190 14
37822 -246 190 14 246 190 14 246 190 14 246 190 14
37823 -241 196 14 184 144 12 10 10 10 2 2 6
37824 - 6 6 6 116 116 116 242 242 242 253 253 253
37825 -253 253 253 253 253 253 253 253 253 253 253 253
37826 -253 253 253 253 253 253 253 253 253 253 253 253
37827 -253 253 253 253 253 253 253 253 253 253 253 253
37828 -253 253 253 253 253 253 253 253 253 253 253 253
37829 -253 253 253 231 231 231 198 198 198 214 170 54
37830 -236 178 12 236 178 12 210 150 10 137 92 6
37831 - 18 14 6 2 2 6 2 2 6 2 2 6
37832 - 6 6 6 70 47 6 200 144 11 236 178 12
37833 -239 182 13 239 182 13 124 112 88 58 58 58
37834 - 22 22 22 6 6 6 0 0 0 0 0 0
37835 - 0 0 0 0 0 0 0 0 0 0 0 0
37836 - 0 0 0 0 0 0 0 0 0 0 0 0
37837 - 0 0 0 0 0 0 0 0 0 0 0 0
37838 - 0 0 0 0 0 0 0 0 0 10 10 10
37839 - 30 30 30 70 70 70 180 133 36 226 170 11
37840 -239 182 13 242 186 14 242 186 14 246 186 14
37841 -246 190 14 246 190 14 246 190 14 246 190 14
37842 -246 190 14 246 190 14 246 190 14 246 190 14
37843 -246 190 14 232 195 16 98 70 6 2 2 6
37844 - 2 2 6 2 2 6 66 66 66 221 221 221
37845 -253 253 253 253 253 253 253 253 253 253 253 253
37846 -253 253 253 253 253 253 253 253 253 253 253 253
37847 -253 253 253 253 253 253 253 253 253 253 253 253
37848 -253 253 253 253 253 253 253 253 253 253 253 253
37849 -253 253 253 206 206 206 198 198 198 214 166 58
37850 -230 174 11 230 174 11 216 158 10 192 133 9
37851 -163 110 8 116 81 8 102 78 10 116 81 8
37852 -167 114 7 197 138 11 226 170 11 239 182 13
37853 -242 186 14 242 186 14 162 146 94 78 78 78
37854 - 34 34 34 14 14 14 6 6 6 0 0 0
37855 - 0 0 0 0 0 0 0 0 0 0 0 0
37856 - 0 0 0 0 0 0 0 0 0 0 0 0
37857 - 0 0 0 0 0 0 0 0 0 0 0 0
37858 - 0 0 0 0 0 0 0 0 0 6 6 6
37859 - 30 30 30 78 78 78 190 142 34 226 170 11
37860 -239 182 13 246 190 14 246 190 14 246 190 14
37861 -246 190 14 246 190 14 246 190 14 246 190 14
37862 -246 190 14 246 190 14 246 190 14 246 190 14
37863 -246 190 14 241 196 14 203 166 17 22 18 6
37864 - 2 2 6 2 2 6 2 2 6 38 38 38
37865 -218 218 218 253 253 253 253 253 253 253 253 253
37866 -253 253 253 253 253 253 253 253 253 253 253 253
37867 -253 253 253 253 253 253 253 253 253 253 253 253
37868 -253 253 253 253 253 253 253 253 253 253 253 253
37869 -250 250 250 206 206 206 198 198 198 202 162 69
37870 -226 170 11 236 178 12 224 166 10 210 150 10
37871 -200 144 11 197 138 11 192 133 9 197 138 11
37872 -210 150 10 226 170 11 242 186 14 246 190 14
37873 -246 190 14 246 186 14 225 175 15 124 112 88
37874 - 62 62 62 30 30 30 14 14 14 6 6 6
37875 - 0 0 0 0 0 0 0 0 0 0 0 0
37876 - 0 0 0 0 0 0 0 0 0 0 0 0
37877 - 0 0 0 0 0 0 0 0 0 0 0 0
37878 - 0 0 0 0 0 0 0 0 0 10 10 10
37879 - 30 30 30 78 78 78 174 135 50 224 166 10
37880 -239 182 13 246 190 14 246 190 14 246 190 14
37881 -246 190 14 246 190 14 246 190 14 246 190 14
37882 -246 190 14 246 190 14 246 190 14 246 190 14
37883 -246 190 14 246 190 14 241 196 14 139 102 15
37884 - 2 2 6 2 2 6 2 2 6 2 2 6
37885 - 78 78 78 250 250 250 253 253 253 253 253 253
37886 -253 253 253 253 253 253 253 253 253 253 253 253
37887 -253 253 253 253 253 253 253 253 253 253 253 253
37888 -253 253 253 253 253 253 253 253 253 253 253 253
37889 -250 250 250 214 214 214 198 198 198 190 150 46
37890 -219 162 10 236 178 12 234 174 13 224 166 10
37891 -216 158 10 213 154 11 213 154 11 216 158 10
37892 -226 170 11 239 182 13 246 190 14 246 190 14
37893 -246 190 14 246 190 14 242 186 14 206 162 42
37894 -101 101 101 58 58 58 30 30 30 14 14 14
37895 - 6 6 6 0 0 0 0 0 0 0 0 0
37896 - 0 0 0 0 0 0 0 0 0 0 0 0
37897 - 0 0 0 0 0 0 0 0 0 0 0 0
37898 - 0 0 0 0 0 0 0 0 0 10 10 10
37899 - 30 30 30 74 74 74 174 135 50 216 158 10
37900 -236 178 12 246 190 14 246 190 14 246 190 14
37901 -246 190 14 246 190 14 246 190 14 246 190 14
37902 -246 190 14 246 190 14 246 190 14 246 190 14
37903 -246 190 14 246 190 14 241 196 14 226 184 13
37904 - 61 42 6 2 2 6 2 2 6 2 2 6
37905 - 22 22 22 238 238 238 253 253 253 253 253 253
37906 -253 253 253 253 253 253 253 253 253 253 253 253
37907 -253 253 253 253 253 253 253 253 253 253 253 253
37908 -253 253 253 253 253 253 253 253 253 253 253 253
37909 -253 253 253 226 226 226 187 187 187 180 133 36
37910 -216 158 10 236 178 12 239 182 13 236 178 12
37911 -230 174 11 226 170 11 226 170 11 230 174 11
37912 -236 178 12 242 186 14 246 190 14 246 190 14
37913 -246 190 14 246 190 14 246 186 14 239 182 13
37914 -206 162 42 106 106 106 66 66 66 34 34 34
37915 - 14 14 14 6 6 6 0 0 0 0 0 0
37916 - 0 0 0 0 0 0 0 0 0 0 0 0
37917 - 0 0 0 0 0 0 0 0 0 0 0 0
37918 - 0 0 0 0 0 0 0 0 0 6 6 6
37919 - 26 26 26 70 70 70 163 133 67 213 154 11
37920 -236 178 12 246 190 14 246 190 14 246 190 14
37921 -246 190 14 246 190 14 246 190 14 246 190 14
37922 -246 190 14 246 190 14 246 190 14 246 190 14
37923 -246 190 14 246 190 14 246 190 14 241 196 14
37924 -190 146 13 18 14 6 2 2 6 2 2 6
37925 - 46 46 46 246 246 246 253 253 253 253 253 253
37926 -253 253 253 253 253 253 253 253 253 253 253 253
37927 -253 253 253 253 253 253 253 253 253 253 253 253
37928 -253 253 253 253 253 253 253 253 253 253 253 253
37929 -253 253 253 221 221 221 86 86 86 156 107 11
37930 -216 158 10 236 178 12 242 186 14 246 186 14
37931 -242 186 14 239 182 13 239 182 13 242 186 14
37932 -242 186 14 246 186 14 246 190 14 246 190 14
37933 -246 190 14 246 190 14 246 190 14 246 190 14
37934 -242 186 14 225 175 15 142 122 72 66 66 66
37935 - 30 30 30 10 10 10 0 0 0 0 0 0
37936 - 0 0 0 0 0 0 0 0 0 0 0 0
37937 - 0 0 0 0 0 0 0 0 0 0 0 0
37938 - 0 0 0 0 0 0 0 0 0 6 6 6
37939 - 26 26 26 70 70 70 163 133 67 210 150 10
37940 -236 178 12 246 190 14 246 190 14 246 190 14
37941 -246 190 14 246 190 14 246 190 14 246 190 14
37942 -246 190 14 246 190 14 246 190 14 246 190 14
37943 -246 190 14 246 190 14 246 190 14 246 190 14
37944 -232 195 16 121 92 8 34 34 34 106 106 106
37945 -221 221 221 253 253 253 253 253 253 253 253 253
37946 -253 253 253 253 253 253 253 253 253 253 253 253
37947 -253 253 253 253 253 253 253 253 253 253 253 253
37948 -253 253 253 253 253 253 253 253 253 253 253 253
37949 -242 242 242 82 82 82 18 14 6 163 110 8
37950 -216 158 10 236 178 12 242 186 14 246 190 14
37951 -246 190 14 246 190 14 246 190 14 246 190 14
37952 -246 190 14 246 190 14 246 190 14 246 190 14
37953 -246 190 14 246 190 14 246 190 14 246 190 14
37954 -246 190 14 246 190 14 242 186 14 163 133 67
37955 - 46 46 46 18 18 18 6 6 6 0 0 0
37956 - 0 0 0 0 0 0 0 0 0 0 0 0
37957 - 0 0 0 0 0 0 0 0 0 0 0 0
37958 - 0 0 0 0 0 0 0 0 0 10 10 10
37959 - 30 30 30 78 78 78 163 133 67 210 150 10
37960 -236 178 12 246 186 14 246 190 14 246 190 14
37961 -246 190 14 246 190 14 246 190 14 246 190 14
37962 -246 190 14 246 190 14 246 190 14 246 190 14
37963 -246 190 14 246 190 14 246 190 14 246 190 14
37964 -241 196 14 215 174 15 190 178 144 253 253 253
37965 -253 253 253 253 253 253 253 253 253 253 253 253
37966 -253 253 253 253 253 253 253 253 253 253 253 253
37967 -253 253 253 253 253 253 253 253 253 253 253 253
37968 -253 253 253 253 253 253 253 253 253 218 218 218
37969 - 58 58 58 2 2 6 22 18 6 167 114 7
37970 -216 158 10 236 178 12 246 186 14 246 190 14
37971 -246 190 14 246 190 14 246 190 14 246 190 14
37972 -246 190 14 246 190 14 246 190 14 246 190 14
37973 -246 190 14 246 190 14 246 190 14 246 190 14
37974 -246 190 14 246 186 14 242 186 14 190 150 46
37975 - 54 54 54 22 22 22 6 6 6 0 0 0
37976 - 0 0 0 0 0 0 0 0 0 0 0 0
37977 - 0 0 0 0 0 0 0 0 0 0 0 0
37978 - 0 0 0 0 0 0 0 0 0 14 14 14
37979 - 38 38 38 86 86 86 180 133 36 213 154 11
37980 -236 178 12 246 186 14 246 190 14 246 190 14
37981 -246 190 14 246 190 14 246 190 14 246 190 14
37982 -246 190 14 246 190 14 246 190 14 246 190 14
37983 -246 190 14 246 190 14 246 190 14 246 190 14
37984 -246 190 14 232 195 16 190 146 13 214 214 214
37985 -253 253 253 253 253 253 253 253 253 253 253 253
37986 -253 253 253 253 253 253 253 253 253 253 253 253
37987 -253 253 253 253 253 253 253 253 253 253 253 253
37988 -253 253 253 250 250 250 170 170 170 26 26 26
37989 - 2 2 6 2 2 6 37 26 9 163 110 8
37990 -219 162 10 239 182 13 246 186 14 246 190 14
37991 -246 190 14 246 190 14 246 190 14 246 190 14
37992 -246 190 14 246 190 14 246 190 14 246 190 14
37993 -246 190 14 246 190 14 246 190 14 246 190 14
37994 -246 186 14 236 178 12 224 166 10 142 122 72
37995 - 46 46 46 18 18 18 6 6 6 0 0 0
37996 - 0 0 0 0 0 0 0 0 0 0 0 0
37997 - 0 0 0 0 0 0 0 0 0 0 0 0
37998 - 0 0 0 0 0 0 6 6 6 18 18 18
37999 - 50 50 50 109 106 95 192 133 9 224 166 10
38000 -242 186 14 246 190 14 246 190 14 246 190 14
38001 -246 190 14 246 190 14 246 190 14 246 190 14
38002 -246 190 14 246 190 14 246 190 14 246 190 14
38003 -246 190 14 246 190 14 246 190 14 246 190 14
38004 -242 186 14 226 184 13 210 162 10 142 110 46
38005 -226 226 226 253 253 253 253 253 253 253 253 253
38006 -253 253 253 253 253 253 253 253 253 253 253 253
38007 -253 253 253 253 253 253 253 253 253 253 253 253
38008 -198 198 198 66 66 66 2 2 6 2 2 6
38009 - 2 2 6 2 2 6 50 34 6 156 107 11
38010 -219 162 10 239 182 13 246 186 14 246 190 14
38011 -246 190 14 246 190 14 246 190 14 246 190 14
38012 -246 190 14 246 190 14 246 190 14 246 190 14
38013 -246 190 14 246 190 14 246 190 14 242 186 14
38014 -234 174 13 213 154 11 154 122 46 66 66 66
38015 - 30 30 30 10 10 10 0 0 0 0 0 0
38016 - 0 0 0 0 0 0 0 0 0 0 0 0
38017 - 0 0 0 0 0 0 0 0 0 0 0 0
38018 - 0 0 0 0 0 0 6 6 6 22 22 22
38019 - 58 58 58 154 121 60 206 145 10 234 174 13
38020 -242 186 14 246 186 14 246 190 14 246 190 14
38021 -246 190 14 246 190 14 246 190 14 246 190 14
38022 -246 190 14 246 190 14 246 190 14 246 190 14
38023 -246 190 14 246 190 14 246 190 14 246 190 14
38024 -246 186 14 236 178 12 210 162 10 163 110 8
38025 - 61 42 6 138 138 138 218 218 218 250 250 250
38026 -253 253 253 253 253 253 253 253 253 250 250 250
38027 -242 242 242 210 210 210 144 144 144 66 66 66
38028 - 6 6 6 2 2 6 2 2 6 2 2 6
38029 - 2 2 6 2 2 6 61 42 6 163 110 8
38030 -216 158 10 236 178 12 246 190 14 246 190 14
38031 -246 190 14 246 190 14 246 190 14 246 190 14
38032 -246 190 14 246 190 14 246 190 14 246 190 14
38033 -246 190 14 239 182 13 230 174 11 216 158 10
38034 -190 142 34 124 112 88 70 70 70 38 38 38
38035 - 18 18 18 6 6 6 0 0 0 0 0 0
38036 - 0 0 0 0 0 0 0 0 0 0 0 0
38037 - 0 0 0 0 0 0 0 0 0 0 0 0
38038 - 0 0 0 0 0 0 6 6 6 22 22 22
38039 - 62 62 62 168 124 44 206 145 10 224 166 10
38040 -236 178 12 239 182 13 242 186 14 242 186 14
38041 -246 186 14 246 190 14 246 190 14 246 190 14
38042 -246 190 14 246 190 14 246 190 14 246 190 14
38043 -246 190 14 246 190 14 246 190 14 246 190 14
38044 -246 190 14 236 178 12 216 158 10 175 118 6
38045 - 80 54 7 2 2 6 6 6 6 30 30 30
38046 - 54 54 54 62 62 62 50 50 50 38 38 38
38047 - 14 14 14 2 2 6 2 2 6 2 2 6
38048 - 2 2 6 2 2 6 2 2 6 2 2 6
38049 - 2 2 6 6 6 6 80 54 7 167 114 7
38050 -213 154 11 236 178 12 246 190 14 246 190 14
38051 -246 190 14 246 190 14 246 190 14 246 190 14
38052 -246 190 14 242 186 14 239 182 13 239 182 13
38053 -230 174 11 210 150 10 174 135 50 124 112 88
38054 - 82 82 82 54 54 54 34 34 34 18 18 18
38055 - 6 6 6 0 0 0 0 0 0 0 0 0
38056 - 0 0 0 0 0 0 0 0 0 0 0 0
38057 - 0 0 0 0 0 0 0 0 0 0 0 0
38058 - 0 0 0 0 0 0 6 6 6 18 18 18
38059 - 50 50 50 158 118 36 192 133 9 200 144 11
38060 -216 158 10 219 162 10 224 166 10 226 170 11
38061 -230 174 11 236 178 12 239 182 13 239 182 13
38062 -242 186 14 246 186 14 246 190 14 246 190 14
38063 -246 190 14 246 190 14 246 190 14 246 190 14
38064 -246 186 14 230 174 11 210 150 10 163 110 8
38065 -104 69 6 10 10 10 2 2 6 2 2 6
38066 - 2 2 6 2 2 6 2 2 6 2 2 6
38067 - 2 2 6 2 2 6 2 2 6 2 2 6
38068 - 2 2 6 2 2 6 2 2 6 2 2 6
38069 - 2 2 6 6 6 6 91 60 6 167 114 7
38070 -206 145 10 230 174 11 242 186 14 246 190 14
38071 -246 190 14 246 190 14 246 186 14 242 186 14
38072 -239 182 13 230 174 11 224 166 10 213 154 11
38073 -180 133 36 124 112 88 86 86 86 58 58 58
38074 - 38 38 38 22 22 22 10 10 10 6 6 6
38075 - 0 0 0 0 0 0 0 0 0 0 0 0
38076 - 0 0 0 0 0 0 0 0 0 0 0 0
38077 - 0 0 0 0 0 0 0 0 0 0 0 0
38078 - 0 0 0 0 0 0 0 0 0 14 14 14
38079 - 34 34 34 70 70 70 138 110 50 158 118 36
38080 -167 114 7 180 123 7 192 133 9 197 138 11
38081 -200 144 11 206 145 10 213 154 11 219 162 10
38082 -224 166 10 230 174 11 239 182 13 242 186 14
38083 -246 186 14 246 186 14 246 186 14 246 186 14
38084 -239 182 13 216 158 10 185 133 11 152 99 6
38085 -104 69 6 18 14 6 2 2 6 2 2 6
38086 - 2 2 6 2 2 6 2 2 6 2 2 6
38087 - 2 2 6 2 2 6 2 2 6 2 2 6
38088 - 2 2 6 2 2 6 2 2 6 2 2 6
38089 - 2 2 6 6 6 6 80 54 7 152 99 6
38090 -192 133 9 219 162 10 236 178 12 239 182 13
38091 -246 186 14 242 186 14 239 182 13 236 178 12
38092 -224 166 10 206 145 10 192 133 9 154 121 60
38093 - 94 94 94 62 62 62 42 42 42 22 22 22
38094 - 14 14 14 6 6 6 0 0 0 0 0 0
38095 - 0 0 0 0 0 0 0 0 0 0 0 0
38096 - 0 0 0 0 0 0 0 0 0 0 0 0
38097 - 0 0 0 0 0 0 0 0 0 0 0 0
38098 - 0 0 0 0 0 0 0 0 0 6 6 6
38099 - 18 18 18 34 34 34 58 58 58 78 78 78
38100 -101 98 89 124 112 88 142 110 46 156 107 11
38101 -163 110 8 167 114 7 175 118 6 180 123 7
38102 -185 133 11 197 138 11 210 150 10 219 162 10
38103 -226 170 11 236 178 12 236 178 12 234 174 13
38104 -219 162 10 197 138 11 163 110 8 130 83 6
38105 - 91 60 6 10 10 10 2 2 6 2 2 6
38106 - 18 18 18 38 38 38 38 38 38 38 38 38
38107 - 38 38 38 38 38 38 38 38 38 38 38 38
38108 - 38 38 38 38 38 38 26 26 26 2 2 6
38109 - 2 2 6 6 6 6 70 47 6 137 92 6
38110 -175 118 6 200 144 11 219 162 10 230 174 11
38111 -234 174 13 230 174 11 219 162 10 210 150 10
38112 -192 133 9 163 110 8 124 112 88 82 82 82
38113 - 50 50 50 30 30 30 14 14 14 6 6 6
38114 - 0 0 0 0 0 0 0 0 0 0 0 0
38115 - 0 0 0 0 0 0 0 0 0 0 0 0
38116 - 0 0 0 0 0 0 0 0 0 0 0 0
38117 - 0 0 0 0 0 0 0 0 0 0 0 0
38118 - 0 0 0 0 0 0 0 0 0 0 0 0
38119 - 6 6 6 14 14 14 22 22 22 34 34 34
38120 - 42 42 42 58 58 58 74 74 74 86 86 86
38121 -101 98 89 122 102 70 130 98 46 121 87 25
38122 -137 92 6 152 99 6 163 110 8 180 123 7
38123 -185 133 11 197 138 11 206 145 10 200 144 11
38124 -180 123 7 156 107 11 130 83 6 104 69 6
38125 - 50 34 6 54 54 54 110 110 110 101 98 89
38126 - 86 86 86 82 82 82 78 78 78 78 78 78
38127 - 78 78 78 78 78 78 78 78 78 78 78 78
38128 - 78 78 78 82 82 82 86 86 86 94 94 94
38129 -106 106 106 101 101 101 86 66 34 124 80 6
38130 -156 107 11 180 123 7 192 133 9 200 144 11
38131 -206 145 10 200 144 11 192 133 9 175 118 6
38132 -139 102 15 109 106 95 70 70 70 42 42 42
38133 - 22 22 22 10 10 10 0 0 0 0 0 0
38134 - 0 0 0 0 0 0 0 0 0 0 0 0
38135 - 0 0 0 0 0 0 0 0 0 0 0 0
38136 - 0 0 0 0 0 0 0 0 0 0 0 0
38137 - 0 0 0 0 0 0 0 0 0 0 0 0
38138 - 0 0 0 0 0 0 0 0 0 0 0 0
38139 - 0 0 0 0 0 0 6 6 6 10 10 10
38140 - 14 14 14 22 22 22 30 30 30 38 38 38
38141 - 50 50 50 62 62 62 74 74 74 90 90 90
38142 -101 98 89 112 100 78 121 87 25 124 80 6
38143 -137 92 6 152 99 6 152 99 6 152 99 6
38144 -138 86 6 124 80 6 98 70 6 86 66 30
38145 -101 98 89 82 82 82 58 58 58 46 46 46
38146 - 38 38 38 34 34 34 34 34 34 34 34 34
38147 - 34 34 34 34 34 34 34 34 34 34 34 34
38148 - 34 34 34 34 34 34 38 38 38 42 42 42
38149 - 54 54 54 82 82 82 94 86 76 91 60 6
38150 -134 86 6 156 107 11 167 114 7 175 118 6
38151 -175 118 6 167 114 7 152 99 6 121 87 25
38152 -101 98 89 62 62 62 34 34 34 18 18 18
38153 - 6 6 6 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 0 0 0 0 0 0
38155 - 0 0 0 0 0 0 0 0 0 0 0 0
38156 - 0 0 0 0 0 0 0 0 0 0 0 0
38157 - 0 0 0 0 0 0 0 0 0 0 0 0
38158 - 0 0 0 0 0 0 0 0 0 0 0 0
38159 - 0 0 0 0 0 0 0 0 0 0 0 0
38160 - 0 0 0 6 6 6 6 6 6 10 10 10
38161 - 18 18 18 22 22 22 30 30 30 42 42 42
38162 - 50 50 50 66 66 66 86 86 86 101 98 89
38163 -106 86 58 98 70 6 104 69 6 104 69 6
38164 -104 69 6 91 60 6 82 62 34 90 90 90
38165 - 62 62 62 38 38 38 22 22 22 14 14 14
38166 - 10 10 10 10 10 10 10 10 10 10 10 10
38167 - 10 10 10 10 10 10 6 6 6 10 10 10
38168 - 10 10 10 10 10 10 10 10 10 14 14 14
38169 - 22 22 22 42 42 42 70 70 70 89 81 66
38170 - 80 54 7 104 69 6 124 80 6 137 92 6
38171 -134 86 6 116 81 8 100 82 52 86 86 86
38172 - 58 58 58 30 30 30 14 14 14 6 6 6
38173 - 0 0 0 0 0 0 0 0 0 0 0 0
38174 - 0 0 0 0 0 0 0 0 0 0 0 0
38175 - 0 0 0 0 0 0 0 0 0 0 0 0
38176 - 0 0 0 0 0 0 0 0 0 0 0 0
38177 - 0 0 0 0 0 0 0 0 0 0 0 0
38178 - 0 0 0 0 0 0 0 0 0 0 0 0
38179 - 0 0 0 0 0 0 0 0 0 0 0 0
38180 - 0 0 0 0 0 0 0 0 0 0 0 0
38181 - 0 0 0 6 6 6 10 10 10 14 14 14
38182 - 18 18 18 26 26 26 38 38 38 54 54 54
38183 - 70 70 70 86 86 86 94 86 76 89 81 66
38184 - 89 81 66 86 86 86 74 74 74 50 50 50
38185 - 30 30 30 14 14 14 6 6 6 0 0 0
38186 - 0 0 0 0 0 0 0 0 0 0 0 0
38187 - 0 0 0 0 0 0 0 0 0 0 0 0
38188 - 0 0 0 0 0 0 0 0 0 0 0 0
38189 - 6 6 6 18 18 18 34 34 34 58 58 58
38190 - 82 82 82 89 81 66 89 81 66 89 81 66
38191 - 94 86 66 94 86 76 74 74 74 50 50 50
38192 - 26 26 26 14 14 14 6 6 6 0 0 0
38193 - 0 0 0 0 0 0 0 0 0 0 0 0
38194 - 0 0 0 0 0 0 0 0 0 0 0 0
38195 - 0 0 0 0 0 0 0 0 0 0 0 0
38196 - 0 0 0 0 0 0 0 0 0 0 0 0
38197 - 0 0 0 0 0 0 0 0 0 0 0 0
38198 - 0 0 0 0 0 0 0 0 0 0 0 0
38199 - 0 0 0 0 0 0 0 0 0 0 0 0
38200 - 0 0 0 0 0 0 0 0 0 0 0 0
38201 - 0 0 0 0 0 0 0 0 0 0 0 0
38202 - 6 6 6 6 6 6 14 14 14 18 18 18
38203 - 30 30 30 38 38 38 46 46 46 54 54 54
38204 - 50 50 50 42 42 42 30 30 30 18 18 18
38205 - 10 10 10 0 0 0 0 0 0 0 0 0
38206 - 0 0 0 0 0 0 0 0 0 0 0 0
38207 - 0 0 0 0 0 0 0 0 0 0 0 0
38208 - 0 0 0 0 0 0 0 0 0 0 0 0
38209 - 0 0 0 6 6 6 14 14 14 26 26 26
38210 - 38 38 38 50 50 50 58 58 58 58 58 58
38211 - 54 54 54 42 42 42 30 30 30 18 18 18
38212 - 10 10 10 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 0 0 0 0 0 0 0 0 0 0 0 0
38215 - 0 0 0 0 0 0 0 0 0 0 0 0
38216 - 0 0 0 0 0 0 0 0 0 0 0 0
38217 - 0 0 0 0 0 0 0 0 0 0 0 0
38218 - 0 0 0 0 0 0 0 0 0 0 0 0
38219 - 0 0 0 0 0 0 0 0 0 0 0 0
38220 - 0 0 0 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 0 0 0 0 0 0 0 0 0
38222 - 0 0 0 0 0 0 0 0 0 6 6 6
38223 - 6 6 6 10 10 10 14 14 14 18 18 18
38224 - 18 18 18 14 14 14 10 10 10 6 6 6
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 0 0 0 0 0 0 0 0 0 0 0 0
38227 - 0 0 0 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 6 6 6
38230 - 14 14 14 18 18 18 22 22 22 22 22 22
38231 - 18 18 18 14 14 14 10 10 10 6 6 6
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 0 0 0
38235 - 0 0 0 0 0 0 0 0 0 0 0 0
38236 - 0 0 0 0 0 0 0 0 0 0 0 0
38237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250 +4 4 4 4 4 4
38251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +4 4 4 4 4 4
38265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38278 +4 4 4 4 4 4
38279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38292 +4 4 4 4 4 4
38293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38306 +4 4 4 4 4 4
38307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38320 +4 4 4 4 4 4
38321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38325 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38326 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38330 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38331 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38332 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38334 +4 4 4 4 4 4
38335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38339 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38340 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38341 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38344 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38345 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38346 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38347 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38348 +4 4 4 4 4 4
38349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38353 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38354 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38355 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38358 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38359 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38360 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38361 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38362 +4 4 4 4 4 4
38363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38366 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38367 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38368 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38369 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38371 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38372 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38373 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38374 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38375 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38376 +4 4 4 4 4 4
38377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38380 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38381 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38382 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38383 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38384 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38385 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38386 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38387 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38388 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38389 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38390 +4 4 4 4 4 4
38391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38394 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38395 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38396 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38397 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38398 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38399 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38400 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38401 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38402 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38403 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38404 +4 4 4 4 4 4
38405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38407 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38408 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38409 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38410 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38411 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38412 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38413 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38414 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38415 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38416 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38417 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38418 +4 4 4 4 4 4
38419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38421 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38422 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38423 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38424 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38425 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38426 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38427 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38428 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38429 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38430 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38431 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38432 +4 4 4 4 4 4
38433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38435 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38436 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38437 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38438 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38439 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38440 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38441 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38442 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38443 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38444 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38445 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38446 +4 4 4 4 4 4
38447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38449 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38450 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38451 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38452 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38453 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38454 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38455 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38456 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38457 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38458 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38459 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38460 +4 4 4 4 4 4
38461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38462 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38463 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38464 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38465 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38466 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38467 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38468 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38469 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38470 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38471 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38472 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38473 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38474 +4 4 4 4 4 4
38475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38476 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38477 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38478 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38479 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38480 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38481 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38482 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38483 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38484 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38485 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38486 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38487 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38488 +0 0 0 4 4 4
38489 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38490 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38491 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38492 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38493 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38494 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38495 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38496 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38497 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38498 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38499 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38500 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38501 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38502 +2 0 0 0 0 0
38503 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38504 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38505 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38506 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38507 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38508 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38509 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38510 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38511 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38512 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38513 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38514 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38515 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38516 +37 38 37 0 0 0
38517 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38518 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38519 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38520 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38521 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38522 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38523 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38524 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38525 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38526 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38527 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38528 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38529 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38530 +85 115 134 4 0 0
38531 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38532 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38533 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38534 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38535 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38536 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38537 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38538 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38539 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38540 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38541 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38542 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38543 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38544 +60 73 81 4 0 0
38545 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38546 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38547 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38548 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38549 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38550 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38551 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38552 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38553 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38554 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38555 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38556 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38557 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38558 +16 19 21 4 0 0
38559 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38560 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38561 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38562 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38563 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38564 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38565 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38566 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38567 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38568 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38569 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38570 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38571 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38572 +4 0 0 4 3 3
38573 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38574 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38575 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38577 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38578 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38579 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38580 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38581 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38582 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38583 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38584 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38585 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38586 +3 2 2 4 4 4
38587 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38588 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38589 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38590 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38591 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38592 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38593 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38594 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38595 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38596 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38597 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38598 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38599 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38600 +4 4 4 4 4 4
38601 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38602 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38603 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38604 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38605 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38606 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38607 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38608 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38609 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38610 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38611 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38612 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38613 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38614 +4 4 4 4 4 4
38615 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38616 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38617 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38618 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38619 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38620 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38621 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38622 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38623 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38624 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38625 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38626 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38627 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38628 +5 5 5 5 5 5
38629 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38630 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38631 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38632 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38633 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38634 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38635 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38636 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38637 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38638 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38639 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38640 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38641 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38642 +5 5 5 4 4 4
38643 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38644 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38645 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38646 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38647 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38648 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38649 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38650 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38651 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38652 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38653 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38654 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38656 +4 4 4 4 4 4
38657 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38658 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38659 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38660 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38661 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38662 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38663 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38664 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38665 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38666 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38667 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38668 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38670 +4 4 4 4 4 4
38671 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38672 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38673 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38674 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38675 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38676 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38677 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38678 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38679 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38680 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38681 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38684 +4 4 4 4 4 4
38685 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38686 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38687 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38688 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38689 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38690 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38691 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38692 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38693 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38694 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38695 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38698 +4 4 4 4 4 4
38699 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38700 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38701 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38702 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38703 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38704 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38705 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38706 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38707 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38708 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38709 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38712 +4 4 4 4 4 4
38713 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38714 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38715 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38716 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38717 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38718 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38719 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38720 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38721 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38722 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38723 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38726 +4 4 4 4 4 4
38727 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38728 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38729 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38730 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38731 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38732 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38733 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38734 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38735 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38736 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38737 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38740 +4 4 4 4 4 4
38741 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38742 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38743 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38744 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38745 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38746 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38747 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38748 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38749 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38750 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38751 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38754 +4 4 4 4 4 4
38755 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38756 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38757 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38758 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38759 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38760 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38761 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38762 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38763 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38764 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38765 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38768 +4 4 4 4 4 4
38769 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38770 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38771 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38772 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38773 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38774 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38775 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38776 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38777 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38778 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38779 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38782 +4 4 4 4 4 4
38783 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38784 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38785 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38786 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38787 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38788 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38789 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38790 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38791 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38792 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38793 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38796 +4 4 4 4 4 4
38797 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38798 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38799 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38800 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38801 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38802 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38803 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38804 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38805 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38806 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38807 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38810 +4 4 4 4 4 4
38811 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38812 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38813 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38814 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38815 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38816 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38817 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38818 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38819 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38820 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38821 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38824 +4 4 4 4 4 4
38825 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38826 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38827 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38828 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38829 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38830 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38831 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38832 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38833 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38834 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38835 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38838 +4 4 4 4 4 4
38839 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38840 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38841 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38842 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38843 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38844 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38845 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38846 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38847 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38848 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38849 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38852 +4 4 4 4 4 4
38853 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38854 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38855 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38856 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38857 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38858 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38859 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38860 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38861 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38862 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38863 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38866 +4 4 4 4 4 4
38867 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38868 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38869 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38870 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38871 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38872 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38873 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38874 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38875 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38876 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38877 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38880 +4 4 4 4 4 4
38881 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38882 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38883 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38884 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38885 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38886 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38887 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38888 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38889 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38890 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38891 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38894 +4 4 4 4 4 4
38895 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38896 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38897 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38898 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38899 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38900 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38901 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38902 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38903 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38904 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38905 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38908 +4 4 4 4 4 4
38909 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38910 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38911 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38912 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38913 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38914 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38915 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38916 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38917 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38918 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38919 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38922 +4 4 4 4 4 4
38923 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38924 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38925 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38926 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38927 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38928 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38929 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38930 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38931 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38932 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38933 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38936 +4 4 4 4 4 4
38937 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38938 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38939 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38940 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38941 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38942 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38943 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38944 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38945 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38946 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38947 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38950 +4 4 4 4 4 4
38951 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38952 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38953 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38954 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38955 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38956 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38957 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38958 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38959 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38960 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38961 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38964 +4 4 4 4 4 4
38965 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38966 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38967 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38968 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38969 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38970 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38971 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38972 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38973 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38974 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38975 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38978 +4 4 4 4 4 4
38979 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38980 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38981 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38982 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38983 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38984 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38985 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38986 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38987 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38988 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38989 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38992 +4 4 4 4 4 4
38993 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38994 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38995 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38996 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38997 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38998 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38999 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
39000 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
39001 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
39002 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
39003 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39006 +4 4 4 4 4 4
39007 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
39008 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39009 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
39010 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
39011 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
39012 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
39013 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
39014 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
39015 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
39016 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
39017 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020 +4 4 4 4 4 4
39021 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
39022 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
39023 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
39024 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
39025 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
39026 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
39027 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
39029 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
39030 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
39031 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034 +4 4 4 4 4 4
39035 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
39036 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
39037 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
39038 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
39039 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
39040 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
39041 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
39042 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
39043 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
39044 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
39045 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048 +4 4 4 4 4 4
39049 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
39050 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
39051 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39052 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
39053 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
39054 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
39055 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
39056 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
39057 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
39058 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
39059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062 +4 4 4 4 4 4
39063 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39064 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
39065 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
39066 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
39067 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
39068 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
39069 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
39070 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
39071 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
39072 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076 +4 4 4 4 4 4
39077 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
39078 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
39079 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
39080 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
39081 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
39082 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
39083 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
39084 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
39085 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
39086 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090 +4 4 4 4 4 4
39091 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
39092 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
39093 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
39094 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
39095 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
39096 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
39097 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
39098 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
39099 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39100 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104 +4 4 4 4 4 4
39105 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
39106 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39107 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
39108 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
39109 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
39110 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
39111 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
39112 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
39113 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
39114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118 +4 4 4 4 4 4
39119 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
39120 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
39121 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
39122 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
39123 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
39124 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
39125 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
39126 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
39127 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
39128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132 +4 4 4 4 4 4
39133 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39134 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
39135 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
39136 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
39137 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
39138 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
39139 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
39140 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
39141 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146 +4 4 4 4 4 4
39147 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
39148 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
39149 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39150 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
39151 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
39152 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
39153 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
39154 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
39155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160 +4 4 4 4 4 4
39161 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39162 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
39163 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
39164 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
39165 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
39166 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
39167 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
39168 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
39169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39174 +4 4 4 4 4 4
39175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39176 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
39177 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39178 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
39179 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
39180 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
39181 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
39182 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
39183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39188 +4 4 4 4 4 4
39189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39190 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39191 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39192 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39193 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39194 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39195 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39196 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39198 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39202 +4 4 4 4 4 4
39203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39204 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39205 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39206 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39207 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39208 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39209 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39210 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39216 +4 4 4 4 4 4
39217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39219 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39220 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39221 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39222 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39223 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39224 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39230 +4 4 4 4 4 4
39231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39234 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39235 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39236 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39237 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39244 +4 4 4 4 4 4
39245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39248 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39249 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39250 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39251 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39258 +4 4 4 4 4 4
39259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39262 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39263 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39264 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39265 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39272 +4 4 4 4 4 4
39273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39276 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39277 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39278 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39279 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39286 +4 4 4 4 4 4
39287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39291 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39292 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39293 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39300 +4 4 4 4 4 4
39301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39305 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39306 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39307 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39314 +4 4 4 4 4 4
39315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39319 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39320 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39321 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39328 +4 4 4 4 4 4
39329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39333 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39334 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39342 +4 4 4 4 4 4
39343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39347 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39348 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39356 +4 4 4 4 4 4
39357 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39358 index 3473e75..c930142 100644
39359 --- a/drivers/video/udlfb.c
39360 +++ b/drivers/video/udlfb.c
39361 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39362 dlfb_urb_completion(urb);
39363
39364 error:
39365 - atomic_add(bytes_sent, &dev->bytes_sent);
39366 - atomic_add(bytes_identical, &dev->bytes_identical);
39367 - atomic_add(width*height*2, &dev->bytes_rendered);
39368 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39369 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39370 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39371 end_cycles = get_cycles();
39372 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39373 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39374 >> 10)), /* Kcycles */
39375 &dev->cpu_kcycles_used);
39376
39377 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39378 dlfb_urb_completion(urb);
39379
39380 error:
39381 - atomic_add(bytes_sent, &dev->bytes_sent);
39382 - atomic_add(bytes_identical, &dev->bytes_identical);
39383 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39384 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39385 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39386 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39387 end_cycles = get_cycles();
39388 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39389 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39390 >> 10)), /* Kcycles */
39391 &dev->cpu_kcycles_used);
39392 }
39393 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39394 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39395 struct dlfb_data *dev = fb_info->par;
39396 return snprintf(buf, PAGE_SIZE, "%u\n",
39397 - atomic_read(&dev->bytes_rendered));
39398 + atomic_read_unchecked(&dev->bytes_rendered));
39399 }
39400
39401 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39402 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39403 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39404 struct dlfb_data *dev = fb_info->par;
39405 return snprintf(buf, PAGE_SIZE, "%u\n",
39406 - atomic_read(&dev->bytes_identical));
39407 + atomic_read_unchecked(&dev->bytes_identical));
39408 }
39409
39410 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39411 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39412 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39413 struct dlfb_data *dev = fb_info->par;
39414 return snprintf(buf, PAGE_SIZE, "%u\n",
39415 - atomic_read(&dev->bytes_sent));
39416 + atomic_read_unchecked(&dev->bytes_sent));
39417 }
39418
39419 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39420 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39421 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39422 struct dlfb_data *dev = fb_info->par;
39423 return snprintf(buf, PAGE_SIZE, "%u\n",
39424 - atomic_read(&dev->cpu_kcycles_used));
39425 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39426 }
39427
39428 static ssize_t edid_show(
39429 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39430 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39431 struct dlfb_data *dev = fb_info->par;
39432
39433 - atomic_set(&dev->bytes_rendered, 0);
39434 - atomic_set(&dev->bytes_identical, 0);
39435 - atomic_set(&dev->bytes_sent, 0);
39436 - atomic_set(&dev->cpu_kcycles_used, 0);
39437 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39438 + atomic_set_unchecked(&dev->bytes_identical, 0);
39439 + atomic_set_unchecked(&dev->bytes_sent, 0);
39440 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39441
39442 return count;
39443 }
39444 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39445 index 7f8472c..9842e87 100644
39446 --- a/drivers/video/uvesafb.c
39447 +++ b/drivers/video/uvesafb.c
39448 @@ -19,6 +19,7 @@
39449 #include <linux/io.h>
39450 #include <linux/mutex.h>
39451 #include <linux/slab.h>
39452 +#include <linux/moduleloader.h>
39453 #include <video/edid.h>
39454 #include <video/uvesafb.h>
39455 #ifdef CONFIG_X86
39456 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39457 NULL,
39458 };
39459
39460 - return call_usermodehelper(v86d_path, argv, envp, 1);
39461 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39462 }
39463
39464 /*
39465 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39466 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39467 par->pmi_setpal = par->ypan = 0;
39468 } else {
39469 +
39470 +#ifdef CONFIG_PAX_KERNEXEC
39471 +#ifdef CONFIG_MODULES
39472 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39473 +#endif
39474 + if (!par->pmi_code) {
39475 + par->pmi_setpal = par->ypan = 0;
39476 + return 0;
39477 + }
39478 +#endif
39479 +
39480 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39481 + task->t.regs.edi);
39482 +
39483 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39484 + pax_open_kernel();
39485 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39486 + pax_close_kernel();
39487 +
39488 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39489 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39490 +#else
39491 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39492 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39493 +#endif
39494 +
39495 printk(KERN_INFO "uvesafb: protected mode interface info at "
39496 "%04x:%04x\n",
39497 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39498 @@ -1821,6 +1844,11 @@ out:
39499 if (par->vbe_modes)
39500 kfree(par->vbe_modes);
39501
39502 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39503 + if (par->pmi_code)
39504 + module_free_exec(NULL, par->pmi_code);
39505 +#endif
39506 +
39507 framebuffer_release(info);
39508 return err;
39509 }
39510 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39511 kfree(par->vbe_state_orig);
39512 if (par->vbe_state_saved)
39513 kfree(par->vbe_state_saved);
39514 +
39515 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39516 + if (par->pmi_code)
39517 + module_free_exec(NULL, par->pmi_code);
39518 +#endif
39519 +
39520 }
39521
39522 framebuffer_release(info);
39523 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39524 index 501b340..86bd4cf 100644
39525 --- a/drivers/video/vesafb.c
39526 +++ b/drivers/video/vesafb.c
39527 @@ -9,6 +9,7 @@
39528 */
39529
39530 #include <linux/module.h>
39531 +#include <linux/moduleloader.h>
39532 #include <linux/kernel.h>
39533 #include <linux/errno.h>
39534 #include <linux/string.h>
39535 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39536 static int vram_total __initdata; /* Set total amount of memory */
39537 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39538 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39539 -static void (*pmi_start)(void) __read_mostly;
39540 -static void (*pmi_pal) (void) __read_mostly;
39541 +static void (*pmi_start)(void) __read_only;
39542 +static void (*pmi_pal) (void) __read_only;
39543 static int depth __read_mostly;
39544 static int vga_compat __read_mostly;
39545 /* --------------------------------------------------------------------- */
39546 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39547 unsigned int size_vmode;
39548 unsigned int size_remap;
39549 unsigned int size_total;
39550 + void *pmi_code = NULL;
39551
39552 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39553 return -ENODEV;
39554 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39555 size_remap = size_total;
39556 vesafb_fix.smem_len = size_remap;
39557
39558 -#ifndef __i386__
39559 - screen_info.vesapm_seg = 0;
39560 -#endif
39561 -
39562 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39563 printk(KERN_WARNING
39564 "vesafb: cannot reserve video memory at 0x%lx\n",
39565 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39566 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39567 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39568
39569 +#ifdef __i386__
39570 +
39571 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39572 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39573 + if (!pmi_code)
39574 +#elif !defined(CONFIG_PAX_KERNEXEC)
39575 + if (0)
39576 +#endif
39577 +
39578 +#endif
39579 + screen_info.vesapm_seg = 0;
39580 +
39581 if (screen_info.vesapm_seg) {
39582 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39583 - screen_info.vesapm_seg,screen_info.vesapm_off);
39584 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39585 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39586 }
39587
39588 if (screen_info.vesapm_seg < 0xc000)
39589 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39590
39591 if (ypan || pmi_setpal) {
39592 unsigned short *pmi_base;
39593 +
39594 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39595 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39596 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39597 +
39598 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39599 + pax_open_kernel();
39600 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39601 +#else
39602 + pmi_code = pmi_base;
39603 +#endif
39604 +
39605 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39606 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39607 +
39608 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39609 + pmi_start = ktva_ktla(pmi_start);
39610 + pmi_pal = ktva_ktla(pmi_pal);
39611 + pax_close_kernel();
39612 +#endif
39613 +
39614 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39615 if (pmi_base[3]) {
39616 printk(KERN_INFO "vesafb: pmi: ports = ");
39617 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39618 info->node, info->fix.id);
39619 return 0;
39620 err:
39621 +
39622 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39623 + module_free_exec(NULL, pmi_code);
39624 +#endif
39625 +
39626 if (info->screen_base)
39627 iounmap(info->screen_base);
39628 framebuffer_release(info);
39629 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39630 index 88714ae..16c2e11 100644
39631 --- a/drivers/video/via/via_clock.h
39632 +++ b/drivers/video/via/via_clock.h
39633 @@ -56,7 +56,7 @@ struct via_clock {
39634
39635 void (*set_engine_pll_state)(u8 state);
39636 void (*set_engine_pll)(struct via_pll_config config);
39637 -};
39638 +} __no_const;
39639
39640
39641 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39642 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39643 index e56c934..fc22f4b 100644
39644 --- a/drivers/xen/xen-pciback/conf_space.h
39645 +++ b/drivers/xen/xen-pciback/conf_space.h
39646 @@ -44,15 +44,15 @@ struct config_field {
39647 struct {
39648 conf_dword_write write;
39649 conf_dword_read read;
39650 - } dw;
39651 + } __no_const dw;
39652 struct {
39653 conf_word_write write;
39654 conf_word_read read;
39655 - } w;
39656 + } __no_const w;
39657 struct {
39658 conf_byte_write write;
39659 conf_byte_read read;
39660 - } b;
39661 + } __no_const b;
39662 } u;
39663 struct list_head list;
39664 };
39665 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39666 index 879ed88..bc03a01 100644
39667 --- a/fs/9p/vfs_inode.c
39668 +++ b/fs/9p/vfs_inode.c
39669 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39670 void
39671 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39672 {
39673 - char *s = nd_get_link(nd);
39674 + const char *s = nd_get_link(nd);
39675
39676 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39677 IS_ERR(s) ? "<error>" : s);
39678 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39679 index 79e2ca7..5828ad1 100644
39680 --- a/fs/Kconfig.binfmt
39681 +++ b/fs/Kconfig.binfmt
39682 @@ -86,7 +86,7 @@ config HAVE_AOUT
39683
39684 config BINFMT_AOUT
39685 tristate "Kernel support for a.out and ECOFF binaries"
39686 - depends on HAVE_AOUT
39687 + depends on HAVE_AOUT && BROKEN
39688 ---help---
39689 A.out (Assembler.OUTput) is a set of formats for libraries and
39690 executables used in the earliest versions of UNIX. Linux used
39691 diff --git a/fs/aio.c b/fs/aio.c
39692 index 969beb0..09fab51 100644
39693 --- a/fs/aio.c
39694 +++ b/fs/aio.c
39695 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39696 size += sizeof(struct io_event) * nr_events;
39697 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39698
39699 - if (nr_pages < 0)
39700 + if (nr_pages <= 0)
39701 return -EINVAL;
39702
39703 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39704 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39705 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39706 {
39707 ssize_t ret;
39708 + struct iovec iovstack;
39709
39710 #ifdef CONFIG_COMPAT
39711 if (compat)
39712 ret = compat_rw_copy_check_uvector(type,
39713 (struct compat_iovec __user *)kiocb->ki_buf,
39714 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39715 + kiocb->ki_nbytes, 1, &iovstack,
39716 &kiocb->ki_iovec, 1);
39717 else
39718 #endif
39719 ret = rw_copy_check_uvector(type,
39720 (struct iovec __user *)kiocb->ki_buf,
39721 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39722 + kiocb->ki_nbytes, 1, &iovstack,
39723 &kiocb->ki_iovec, 1);
39724 if (ret < 0)
39725 goto out;
39726
39727 + if (kiocb->ki_iovec == &iovstack) {
39728 + kiocb->ki_inline_vec = iovstack;
39729 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39730 + }
39731 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39732 kiocb->ki_cur_seg = 0;
39733 /* ki_nbytes/left now reflect bytes instead of segs */
39734 diff --git a/fs/attr.c b/fs/attr.c
39735 index 7ee7ba4..0c61a60 100644
39736 --- a/fs/attr.c
39737 +++ b/fs/attr.c
39738 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39739 unsigned long limit;
39740
39741 limit = rlimit(RLIMIT_FSIZE);
39742 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39743 if (limit != RLIM_INFINITY && offset > limit)
39744 goto out_sig;
39745 if (offset > inode->i_sb->s_maxbytes)
39746 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39747 index e1fbdee..cd5ea56 100644
39748 --- a/fs/autofs4/waitq.c
39749 +++ b/fs/autofs4/waitq.c
39750 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39751 {
39752 unsigned long sigpipe, flags;
39753 mm_segment_t fs;
39754 - const char *data = (const char *)addr;
39755 + const char __user *data = (const char __force_user *)addr;
39756 ssize_t wr = 0;
39757
39758 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39759 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39760 index 8342ca6..82fd192 100644
39761 --- a/fs/befs/linuxvfs.c
39762 +++ b/fs/befs/linuxvfs.c
39763 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39764 {
39765 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39766 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39767 - char *link = nd_get_link(nd);
39768 + const char *link = nd_get_link(nd);
39769 if (!IS_ERR(link))
39770 kfree(link);
39771 }
39772 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39773 index a6395bd..a5b24c4 100644
39774 --- a/fs/binfmt_aout.c
39775 +++ b/fs/binfmt_aout.c
39776 @@ -16,6 +16,7 @@
39777 #include <linux/string.h>
39778 #include <linux/fs.h>
39779 #include <linux/file.h>
39780 +#include <linux/security.h>
39781 #include <linux/stat.h>
39782 #include <linux/fcntl.h>
39783 #include <linux/ptrace.h>
39784 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39785 #endif
39786 # define START_STACK(u) ((void __user *)u.start_stack)
39787
39788 + memset(&dump, 0, sizeof(dump));
39789 +
39790 fs = get_fs();
39791 set_fs(KERNEL_DS);
39792 has_dumped = 1;
39793 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39794
39795 /* If the size of the dump file exceeds the rlimit, then see what would happen
39796 if we wrote the stack, but not the data area. */
39797 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39798 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39799 dump.u_dsize = 0;
39800
39801 /* Make sure we have enough room to write the stack and data areas. */
39802 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39803 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39804 dump.u_ssize = 0;
39805
39806 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39807 rlim = rlimit(RLIMIT_DATA);
39808 if (rlim >= RLIM_INFINITY)
39809 rlim = ~0;
39810 +
39811 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39812 if (ex.a_data + ex.a_bss > rlim)
39813 return -ENOMEM;
39814
39815 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39816 install_exec_creds(bprm);
39817 current->flags &= ~PF_FORKNOEXEC;
39818
39819 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39820 + current->mm->pax_flags = 0UL;
39821 +#endif
39822 +
39823 +#ifdef CONFIG_PAX_PAGEEXEC
39824 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39825 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39826 +
39827 +#ifdef CONFIG_PAX_EMUTRAMP
39828 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39829 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39830 +#endif
39831 +
39832 +#ifdef CONFIG_PAX_MPROTECT
39833 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39834 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39835 +#endif
39836 +
39837 + }
39838 +#endif
39839 +
39840 if (N_MAGIC(ex) == OMAGIC) {
39841 unsigned long text_addr, map_size;
39842 loff_t pos;
39843 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39844
39845 down_write(&current->mm->mmap_sem);
39846 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39847 - PROT_READ | PROT_WRITE | PROT_EXEC,
39848 + PROT_READ | PROT_WRITE,
39849 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39850 fd_offset + ex.a_text);
39851 up_write(&current->mm->mmap_sem);
39852 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39853 index 21ac5ee..ca0d90f 100644
39854 --- a/fs/binfmt_elf.c
39855 +++ b/fs/binfmt_elf.c
39856 @@ -32,6 +32,7 @@
39857 #include <linux/elf.h>
39858 #include <linux/utsname.h>
39859 #include <linux/coredump.h>
39860 +#include <linux/xattr.h>
39861 #include <asm/uaccess.h>
39862 #include <asm/param.h>
39863 #include <asm/page.h>
39864 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39865 #define elf_core_dump NULL
39866 #endif
39867
39868 +#ifdef CONFIG_PAX_MPROTECT
39869 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39870 +#endif
39871 +
39872 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39873 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39874 #else
39875 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39876 .load_binary = load_elf_binary,
39877 .load_shlib = load_elf_library,
39878 .core_dump = elf_core_dump,
39879 +
39880 +#ifdef CONFIG_PAX_MPROTECT
39881 + .handle_mprotect= elf_handle_mprotect,
39882 +#endif
39883 +
39884 .min_coredump = ELF_EXEC_PAGESIZE,
39885 };
39886
39887 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39888
39889 static int set_brk(unsigned long start, unsigned long end)
39890 {
39891 + unsigned long e = end;
39892 +
39893 start = ELF_PAGEALIGN(start);
39894 end = ELF_PAGEALIGN(end);
39895 if (end > start) {
39896 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39897 if (BAD_ADDR(addr))
39898 return addr;
39899 }
39900 - current->mm->start_brk = current->mm->brk = end;
39901 + current->mm->start_brk = current->mm->brk = e;
39902 return 0;
39903 }
39904
39905 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39906 elf_addr_t __user *u_rand_bytes;
39907 const char *k_platform = ELF_PLATFORM;
39908 const char *k_base_platform = ELF_BASE_PLATFORM;
39909 - unsigned char k_rand_bytes[16];
39910 + u32 k_rand_bytes[4];
39911 int items;
39912 elf_addr_t *elf_info;
39913 int ei_index = 0;
39914 const struct cred *cred = current_cred();
39915 struct vm_area_struct *vma;
39916 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39917
39918 /*
39919 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39920 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39921 * Generate 16 random bytes for userspace PRNG seeding.
39922 */
39923 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39924 - u_rand_bytes = (elf_addr_t __user *)
39925 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39926 + srandom32(k_rand_bytes[0] ^ random32());
39927 + srandom32(k_rand_bytes[1] ^ random32());
39928 + srandom32(k_rand_bytes[2] ^ random32());
39929 + srandom32(k_rand_bytes[3] ^ random32());
39930 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39931 + u_rand_bytes = (elf_addr_t __user *) p;
39932 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39933 return -EFAULT;
39934
39935 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39936 return -EFAULT;
39937 current->mm->env_end = p;
39938
39939 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39940 +
39941 /* Put the elf_info on the stack in the right place. */
39942 sp = (elf_addr_t __user *)envp + 1;
39943 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39944 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39945 return -EFAULT;
39946 return 0;
39947 }
39948 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39949 {
39950 struct elf_phdr *elf_phdata;
39951 struct elf_phdr *eppnt;
39952 - unsigned long load_addr = 0;
39953 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39954 int load_addr_set = 0;
39955 unsigned long last_bss = 0, elf_bss = 0;
39956 - unsigned long error = ~0UL;
39957 + unsigned long error = -EINVAL;
39958 unsigned long total_size;
39959 int retval, i, size;
39960
39961 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39962 goto out_close;
39963 }
39964
39965 +#ifdef CONFIG_PAX_SEGMEXEC
39966 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39967 + pax_task_size = SEGMEXEC_TASK_SIZE;
39968 +#endif
39969 +
39970 eppnt = elf_phdata;
39971 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39972 if (eppnt->p_type == PT_LOAD) {
39973 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39974 k = load_addr + eppnt->p_vaddr;
39975 if (BAD_ADDR(k) ||
39976 eppnt->p_filesz > eppnt->p_memsz ||
39977 - eppnt->p_memsz > TASK_SIZE ||
39978 - TASK_SIZE - eppnt->p_memsz < k) {
39979 + eppnt->p_memsz > pax_task_size ||
39980 + pax_task_size - eppnt->p_memsz < k) {
39981 error = -ENOMEM;
39982 goto out_close;
39983 }
39984 @@ -528,6 +552,351 @@ out:
39985 return error;
39986 }
39987
39988 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
39989 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39990 +{
39991 + unsigned long pax_flags = 0UL;
39992 +
39993 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39994 +
39995 +#ifdef CONFIG_PAX_PAGEEXEC
39996 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39997 + pax_flags |= MF_PAX_PAGEEXEC;
39998 +#endif
39999 +
40000 +#ifdef CONFIG_PAX_SEGMEXEC
40001 + if (elf_phdata->p_flags & PF_SEGMEXEC)
40002 + pax_flags |= MF_PAX_SEGMEXEC;
40003 +#endif
40004 +
40005 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40006 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40007 + if ((__supported_pte_mask & _PAGE_NX))
40008 + pax_flags &= ~MF_PAX_SEGMEXEC;
40009 + else
40010 + pax_flags &= ~MF_PAX_PAGEEXEC;
40011 + }
40012 +#endif
40013 +
40014 +#ifdef CONFIG_PAX_EMUTRAMP
40015 + if (elf_phdata->p_flags & PF_EMUTRAMP)
40016 + pax_flags |= MF_PAX_EMUTRAMP;
40017 +#endif
40018 +
40019 +#ifdef CONFIG_PAX_MPROTECT
40020 + if (elf_phdata->p_flags & PF_MPROTECT)
40021 + pax_flags |= MF_PAX_MPROTECT;
40022 +#endif
40023 +
40024 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40025 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
40026 + pax_flags |= MF_PAX_RANDMMAP;
40027 +#endif
40028 +
40029 +#endif
40030 +
40031 + return pax_flags;
40032 +}
40033 +
40034 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
40035 +{
40036 + unsigned long pax_flags = 0UL;
40037 +
40038 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40039 +
40040 +#ifdef CONFIG_PAX_PAGEEXEC
40041 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
40042 + pax_flags |= MF_PAX_PAGEEXEC;
40043 +#endif
40044 +
40045 +#ifdef CONFIG_PAX_SEGMEXEC
40046 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
40047 + pax_flags |= MF_PAX_SEGMEXEC;
40048 +#endif
40049 +
40050 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40051 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40052 + if ((__supported_pte_mask & _PAGE_NX))
40053 + pax_flags &= ~MF_PAX_SEGMEXEC;
40054 + else
40055 + pax_flags &= ~MF_PAX_PAGEEXEC;
40056 + }
40057 +#endif
40058 +
40059 +#ifdef CONFIG_PAX_EMUTRAMP
40060 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
40061 + pax_flags |= MF_PAX_EMUTRAMP;
40062 +#endif
40063 +
40064 +#ifdef CONFIG_PAX_MPROTECT
40065 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
40066 + pax_flags |= MF_PAX_MPROTECT;
40067 +#endif
40068 +
40069 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40070 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
40071 + pax_flags |= MF_PAX_RANDMMAP;
40072 +#endif
40073 +
40074 +#endif
40075 +
40076 + return pax_flags;
40077 +}
40078 +
40079 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
40080 +{
40081 + unsigned long pax_flags = 0UL;
40082 +
40083 +#ifdef CONFIG_PAX_EI_PAX
40084 +
40085 +#ifdef CONFIG_PAX_PAGEEXEC
40086 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
40087 + pax_flags |= MF_PAX_PAGEEXEC;
40088 +#endif
40089 +
40090 +#ifdef CONFIG_PAX_SEGMEXEC
40091 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
40092 + pax_flags |= MF_PAX_SEGMEXEC;
40093 +#endif
40094 +
40095 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40096 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40097 + if ((__supported_pte_mask & _PAGE_NX))
40098 + pax_flags &= ~MF_PAX_SEGMEXEC;
40099 + else
40100 + pax_flags &= ~MF_PAX_PAGEEXEC;
40101 + }
40102 +#endif
40103 +
40104 +#ifdef CONFIG_PAX_EMUTRAMP
40105 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
40106 + pax_flags |= MF_PAX_EMUTRAMP;
40107 +#endif
40108 +
40109 +#ifdef CONFIG_PAX_MPROTECT
40110 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
40111 + pax_flags |= MF_PAX_MPROTECT;
40112 +#endif
40113 +
40114 +#ifdef CONFIG_PAX_ASLR
40115 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
40116 + pax_flags |= MF_PAX_RANDMMAP;
40117 +#endif
40118 +
40119 +#else
40120 +
40121 +#ifdef CONFIG_PAX_PAGEEXEC
40122 + pax_flags |= MF_PAX_PAGEEXEC;
40123 +#endif
40124 +
40125 +#ifdef CONFIG_PAX_MPROTECT
40126 + pax_flags |= MF_PAX_MPROTECT;
40127 +#endif
40128 +
40129 +#ifdef CONFIG_PAX_RANDMMAP
40130 + pax_flags |= MF_PAX_RANDMMAP;
40131 +#endif
40132 +
40133 +#ifdef CONFIG_PAX_SEGMEXEC
40134 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
40135 + pax_flags &= ~MF_PAX_PAGEEXEC;
40136 + pax_flags |= MF_PAX_SEGMEXEC;
40137 + }
40138 +#endif
40139 +
40140 +#endif
40141 +
40142 + return pax_flags;
40143 +}
40144 +
40145 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
40146 +{
40147 +
40148 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
40149 + unsigned long i;
40150 +
40151 + for (i = 0UL; i < elf_ex->e_phnum; i++)
40152 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
40153 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
40154 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
40155 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
40156 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
40157 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
40158 + return ~0UL;
40159 +
40160 +#ifdef CONFIG_PAX_SOFTMODE
40161 + if (pax_softmode)
40162 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
40163 + else
40164 +#endif
40165 +
40166 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
40167 + break;
40168 + }
40169 +#endif
40170 +
40171 + return ~0UL;
40172 +}
40173 +
40174 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40175 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
40176 +{
40177 + unsigned long pax_flags = 0UL;
40178 +
40179 +#ifdef CONFIG_PAX_PAGEEXEC
40180 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
40181 + pax_flags |= MF_PAX_PAGEEXEC;
40182 +#endif
40183 +
40184 +#ifdef CONFIG_PAX_SEGMEXEC
40185 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
40186 + pax_flags |= MF_PAX_SEGMEXEC;
40187 +#endif
40188 +
40189 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40190 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40191 + if ((__supported_pte_mask & _PAGE_NX))
40192 + pax_flags &= ~MF_PAX_SEGMEXEC;
40193 + else
40194 + pax_flags &= ~MF_PAX_PAGEEXEC;
40195 + }
40196 +#endif
40197 +
40198 +#ifdef CONFIG_PAX_EMUTRAMP
40199 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40200 + pax_flags |= MF_PAX_EMUTRAMP;
40201 +#endif
40202 +
40203 +#ifdef CONFIG_PAX_MPROTECT
40204 + if (pax_flags_softmode & MF_PAX_MPROTECT)
40205 + pax_flags |= MF_PAX_MPROTECT;
40206 +#endif
40207 +
40208 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40209 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40210 + pax_flags |= MF_PAX_RANDMMAP;
40211 +#endif
40212 +
40213 + return pax_flags;
40214 +}
40215 +
40216 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40217 +{
40218 + unsigned long pax_flags = 0UL;
40219 +
40220 +#ifdef CONFIG_PAX_PAGEEXEC
40221 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40222 + pax_flags |= MF_PAX_PAGEEXEC;
40223 +#endif
40224 +
40225 +#ifdef CONFIG_PAX_SEGMEXEC
40226 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40227 + pax_flags |= MF_PAX_SEGMEXEC;
40228 +#endif
40229 +
40230 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40231 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40232 + if ((__supported_pte_mask & _PAGE_NX))
40233 + pax_flags &= ~MF_PAX_SEGMEXEC;
40234 + else
40235 + pax_flags &= ~MF_PAX_PAGEEXEC;
40236 + }
40237 +#endif
40238 +
40239 +#ifdef CONFIG_PAX_EMUTRAMP
40240 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40241 + pax_flags |= MF_PAX_EMUTRAMP;
40242 +#endif
40243 +
40244 +#ifdef CONFIG_PAX_MPROTECT
40245 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40246 + pax_flags |= MF_PAX_MPROTECT;
40247 +#endif
40248 +
40249 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40250 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40251 + pax_flags |= MF_PAX_RANDMMAP;
40252 +#endif
40253 +
40254 + return pax_flags;
40255 +}
40256 +#endif
40257 +
40258 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40259 +{
40260 +
40261 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40262 + ssize_t xattr_size, i;
40263 + unsigned char xattr_value[5];
40264 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40265 +
40266 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40267 + if (xattr_size <= 0)
40268 + return ~0UL;
40269 +
40270 + for (i = 0; i < xattr_size; i++)
40271 + switch (xattr_value[i]) {
40272 + default:
40273 + return ~0UL;
40274 +
40275 +#define parse_flag(option1, option2, flag) \
40276 + case option1: \
40277 + pax_flags_hardmode |= MF_PAX_##flag; \
40278 + break; \
40279 + case option2: \
40280 + pax_flags_softmode |= MF_PAX_##flag; \
40281 + break;
40282 +
40283 + parse_flag('p', 'P', PAGEEXEC);
40284 + parse_flag('e', 'E', EMUTRAMP);
40285 + parse_flag('m', 'M', MPROTECT);
40286 + parse_flag('r', 'R', RANDMMAP);
40287 + parse_flag('s', 'S', SEGMEXEC);
40288 +
40289 +#undef parse_flag
40290 + }
40291 +
40292 + if (pax_flags_hardmode & pax_flags_softmode)
40293 + return ~0UL;
40294 +
40295 +#ifdef CONFIG_PAX_SOFTMODE
40296 + if (pax_softmode)
40297 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40298 + else
40299 +#endif
40300 +
40301 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40302 +#else
40303 + return ~0UL;
40304 +#endif
40305 +
40306 +}
40307 +
40308 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40309 +{
40310 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40311 +
40312 + pax_flags = pax_parse_ei_pax(elf_ex);
40313 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40314 + xattr_pax_flags = pax_parse_xattr_pax(file);
40315 +
40316 + if (pt_pax_flags == ~0UL)
40317 + pt_pax_flags = xattr_pax_flags;
40318 + else if (xattr_pax_flags == ~0UL)
40319 + xattr_pax_flags = pt_pax_flags;
40320 + if (pt_pax_flags != xattr_pax_flags)
40321 + return -EINVAL;
40322 + if (pt_pax_flags != ~0UL)
40323 + pax_flags = pt_pax_flags;
40324 +
40325 + if (0 > pax_check_flags(&pax_flags))
40326 + return -EINVAL;
40327 +
40328 + current->mm->pax_flags = pax_flags;
40329 + return 0;
40330 +}
40331 +#endif
40332 +
40333 /*
40334 * These are the functions used to load ELF style executables and shared
40335 * libraries. There is no binary dependent code anywhere else.
40336 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40337 {
40338 unsigned int random_variable = 0;
40339
40340 +#ifdef CONFIG_PAX_RANDUSTACK
40341 + if (randomize_va_space)
40342 + return stack_top - current->mm->delta_stack;
40343 +#endif
40344 +
40345 if ((current->flags & PF_RANDOMIZE) &&
40346 !(current->personality & ADDR_NO_RANDOMIZE)) {
40347 random_variable = get_random_int() & STACK_RND_MASK;
40348 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40349 unsigned long load_addr = 0, load_bias = 0;
40350 int load_addr_set = 0;
40351 char * elf_interpreter = NULL;
40352 - unsigned long error;
40353 + unsigned long error = 0;
40354 struct elf_phdr *elf_ppnt, *elf_phdata;
40355 unsigned long elf_bss, elf_brk;
40356 int retval, i;
40357 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40358 unsigned long start_code, end_code, start_data, end_data;
40359 unsigned long reloc_func_desc __maybe_unused = 0;
40360 int executable_stack = EXSTACK_DEFAULT;
40361 - unsigned long def_flags = 0;
40362 struct {
40363 struct elfhdr elf_ex;
40364 struct elfhdr interp_elf_ex;
40365 } *loc;
40366 + unsigned long pax_task_size = TASK_SIZE;
40367
40368 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40369 if (!loc) {
40370 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40371
40372 /* OK, This is the point of no return */
40373 current->flags &= ~PF_FORKNOEXEC;
40374 - current->mm->def_flags = def_flags;
40375 +
40376 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40377 + current->mm->pax_flags = 0UL;
40378 +#endif
40379 +
40380 +#ifdef CONFIG_PAX_DLRESOLVE
40381 + current->mm->call_dl_resolve = 0UL;
40382 +#endif
40383 +
40384 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40385 + current->mm->call_syscall = 0UL;
40386 +#endif
40387 +
40388 +#ifdef CONFIG_PAX_ASLR
40389 + current->mm->delta_mmap = 0UL;
40390 + current->mm->delta_stack = 0UL;
40391 +#endif
40392 +
40393 + current->mm->def_flags = 0;
40394 +
40395 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40396 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40397 + send_sig(SIGKILL, current, 0);
40398 + goto out_free_dentry;
40399 + }
40400 +#endif
40401 +
40402 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40403 + pax_set_initial_flags(bprm);
40404 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40405 + if (pax_set_initial_flags_func)
40406 + (pax_set_initial_flags_func)(bprm);
40407 +#endif
40408 +
40409 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40410 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40411 + current->mm->context.user_cs_limit = PAGE_SIZE;
40412 + current->mm->def_flags |= VM_PAGEEXEC;
40413 + }
40414 +#endif
40415 +
40416 +#ifdef CONFIG_PAX_SEGMEXEC
40417 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40418 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40419 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40420 + pax_task_size = SEGMEXEC_TASK_SIZE;
40421 + current->mm->def_flags |= VM_NOHUGEPAGE;
40422 + }
40423 +#endif
40424 +
40425 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40426 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40427 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40428 + put_cpu();
40429 + }
40430 +#endif
40431
40432 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40433 may depend on the personality. */
40434 SET_PERSONALITY(loc->elf_ex);
40435 +
40436 +#ifdef CONFIG_PAX_ASLR
40437 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40438 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40439 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40440 + }
40441 +#endif
40442 +
40443 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40444 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40445 + executable_stack = EXSTACK_DISABLE_X;
40446 + current->personality &= ~READ_IMPLIES_EXEC;
40447 + } else
40448 +#endif
40449 +
40450 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40451 current->personality |= READ_IMPLIES_EXEC;
40452
40453 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40454 #else
40455 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40456 #endif
40457 +
40458 +#ifdef CONFIG_PAX_RANDMMAP
40459 + /* PaX: randomize base address at the default exe base if requested */
40460 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40461 +#ifdef CONFIG_SPARC64
40462 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40463 +#else
40464 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40465 +#endif
40466 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40467 + elf_flags |= MAP_FIXED;
40468 + }
40469 +#endif
40470 +
40471 }
40472
40473 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40474 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40475 * allowed task size. Note that p_filesz must always be
40476 * <= p_memsz so it is only necessary to check p_memsz.
40477 */
40478 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40479 - elf_ppnt->p_memsz > TASK_SIZE ||
40480 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40481 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40482 + elf_ppnt->p_memsz > pax_task_size ||
40483 + pax_task_size - elf_ppnt->p_memsz < k) {
40484 /* set_brk can never work. Avoid overflows. */
40485 send_sig(SIGKILL, current, 0);
40486 retval = -EINVAL;
40487 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40488 start_data += load_bias;
40489 end_data += load_bias;
40490
40491 +#ifdef CONFIG_PAX_RANDMMAP
40492 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40493 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40494 +#endif
40495 +
40496 /* Calling set_brk effectively mmaps the pages that we need
40497 * for the bss and break sections. We must do this before
40498 * mapping in the interpreter, to make sure it doesn't wind
40499 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40500 goto out_free_dentry;
40501 }
40502 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40503 - send_sig(SIGSEGV, current, 0);
40504 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40505 - goto out_free_dentry;
40506 + /*
40507 + * This bss-zeroing can fail if the ELF
40508 + * file specifies odd protections. So
40509 + * we don't check the return value
40510 + */
40511 }
40512
40513 if (elf_interpreter) {
40514 @@ -1098,7 +1563,7 @@ out:
40515 * Decide what to dump of a segment, part, all or none.
40516 */
40517 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40518 - unsigned long mm_flags)
40519 + unsigned long mm_flags, long signr)
40520 {
40521 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40522
40523 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40524 if (vma->vm_file == NULL)
40525 return 0;
40526
40527 - if (FILTER(MAPPED_PRIVATE))
40528 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40529 goto whole;
40530
40531 /*
40532 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40533 {
40534 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40535 int i = 0;
40536 - do
40537 + do {
40538 i += 2;
40539 - while (auxv[i - 2] != AT_NULL);
40540 + } while (auxv[i - 2] != AT_NULL);
40541 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40542 }
40543
40544 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40545 }
40546
40547 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40548 - unsigned long mm_flags)
40549 + struct coredump_params *cprm)
40550 {
40551 struct vm_area_struct *vma;
40552 size_t size = 0;
40553
40554 for (vma = first_vma(current, gate_vma); vma != NULL;
40555 vma = next_vma(vma, gate_vma))
40556 - size += vma_dump_size(vma, mm_flags);
40557 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40558 return size;
40559 }
40560
40561 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40562
40563 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40564
40565 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40566 + offset += elf_core_vma_data_size(gate_vma, cprm);
40567 offset += elf_core_extra_data_size();
40568 e_shoff = offset;
40569
40570 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40571 offset = dataoff;
40572
40573 size += sizeof(*elf);
40574 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40575 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40576 goto end_coredump;
40577
40578 size += sizeof(*phdr4note);
40579 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40580 if (size > cprm->limit
40581 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40582 goto end_coredump;
40583 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40584 phdr.p_offset = offset;
40585 phdr.p_vaddr = vma->vm_start;
40586 phdr.p_paddr = 0;
40587 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40588 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40589 phdr.p_memsz = vma->vm_end - vma->vm_start;
40590 offset += phdr.p_filesz;
40591 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40592 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40593 phdr.p_align = ELF_EXEC_PAGESIZE;
40594
40595 size += sizeof(phdr);
40596 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40597 if (size > cprm->limit
40598 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40599 goto end_coredump;
40600 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40601 unsigned long addr;
40602 unsigned long end;
40603
40604 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40605 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40606
40607 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40608 struct page *page;
40609 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40610 page = get_dump_page(addr);
40611 if (page) {
40612 void *kaddr = kmap(page);
40613 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40614 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40615 !dump_write(cprm->file, kaddr,
40616 PAGE_SIZE);
40617 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40618
40619 if (e_phnum == PN_XNUM) {
40620 size += sizeof(*shdr4extnum);
40621 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40622 if (size > cprm->limit
40623 || !dump_write(cprm->file, shdr4extnum,
40624 sizeof(*shdr4extnum)))
40625 @@ -2075,6 +2545,97 @@ out:
40626
40627 #endif /* CONFIG_ELF_CORE */
40628
40629 +#ifdef CONFIG_PAX_MPROTECT
40630 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40631 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40632 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40633 + *
40634 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40635 + * basis because we want to allow the common case and not the special ones.
40636 + */
40637 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40638 +{
40639 + struct elfhdr elf_h;
40640 + struct elf_phdr elf_p;
40641 + unsigned long i;
40642 + unsigned long oldflags;
40643 + bool is_textrel_rw, is_textrel_rx, is_relro;
40644 +
40645 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40646 + return;
40647 +
40648 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40649 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40650 +
40651 +#ifdef CONFIG_PAX_ELFRELOCS
40652 + /* possible TEXTREL */
40653 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40654 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40655 +#else
40656 + is_textrel_rw = false;
40657 + is_textrel_rx = false;
40658 +#endif
40659 +
40660 + /* possible RELRO */
40661 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40662 +
40663 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40664 + return;
40665 +
40666 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40667 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40668 +
40669 +#ifdef CONFIG_PAX_ETEXECRELOCS
40670 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40671 +#else
40672 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40673 +#endif
40674 +
40675 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40676 + !elf_check_arch(&elf_h) ||
40677 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40678 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40679 + return;
40680 +
40681 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40682 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40683 + return;
40684 + switch (elf_p.p_type) {
40685 + case PT_DYNAMIC:
40686 + if (!is_textrel_rw && !is_textrel_rx)
40687 + continue;
40688 + i = 0UL;
40689 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40690 + elf_dyn dyn;
40691 +
40692 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40693 + return;
40694 + if (dyn.d_tag == DT_NULL)
40695 + return;
40696 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40697 + gr_log_textrel(vma);
40698 + if (is_textrel_rw)
40699 + vma->vm_flags |= VM_MAYWRITE;
40700 + else
40701 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40702 + vma->vm_flags &= ~VM_MAYWRITE;
40703 + return;
40704 + }
40705 + i++;
40706 + }
40707 + return;
40708 +
40709 + case PT_GNU_RELRO:
40710 + if (!is_relro)
40711 + continue;
40712 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40713 + vma->vm_flags &= ~VM_MAYWRITE;
40714 + return;
40715 + }
40716 + }
40717 +}
40718 +#endif
40719 +
40720 static int __init init_elf_binfmt(void)
40721 {
40722 return register_binfmt(&elf_format);
40723 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40724 index 1bffbe0..c8c283e 100644
40725 --- a/fs/binfmt_flat.c
40726 +++ b/fs/binfmt_flat.c
40727 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40728 realdatastart = (unsigned long) -ENOMEM;
40729 printk("Unable to allocate RAM for process data, errno %d\n",
40730 (int)-realdatastart);
40731 + down_write(&current->mm->mmap_sem);
40732 do_munmap(current->mm, textpos, text_len);
40733 + up_write(&current->mm->mmap_sem);
40734 ret = realdatastart;
40735 goto err;
40736 }
40737 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40738 }
40739 if (IS_ERR_VALUE(result)) {
40740 printk("Unable to read data+bss, errno %d\n", (int)-result);
40741 + down_write(&current->mm->mmap_sem);
40742 do_munmap(current->mm, textpos, text_len);
40743 do_munmap(current->mm, realdatastart, len);
40744 + up_write(&current->mm->mmap_sem);
40745 ret = result;
40746 goto err;
40747 }
40748 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40749 }
40750 if (IS_ERR_VALUE(result)) {
40751 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40752 + down_write(&current->mm->mmap_sem);
40753 do_munmap(current->mm, textpos, text_len + data_len + extra +
40754 MAX_SHARED_LIBS * sizeof(unsigned long));
40755 + up_write(&current->mm->mmap_sem);
40756 ret = result;
40757 goto err;
40758 }
40759 diff --git a/fs/bio.c b/fs/bio.c
40760 index b1fe82c..84da0a9 100644
40761 --- a/fs/bio.c
40762 +++ b/fs/bio.c
40763 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40764 const int read = bio_data_dir(bio) == READ;
40765 struct bio_map_data *bmd = bio->bi_private;
40766 int i;
40767 - char *p = bmd->sgvecs[0].iov_base;
40768 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40769
40770 __bio_for_each_segment(bvec, bio, i, 0) {
40771 char *addr = page_address(bvec->bv_page);
40772 diff --git a/fs/block_dev.c b/fs/block_dev.c
40773 index b07f1da..9efcb92 100644
40774 --- a/fs/block_dev.c
40775 +++ b/fs/block_dev.c
40776 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40777 else if (bdev->bd_contains == bdev)
40778 return true; /* is a whole device which isn't held */
40779
40780 - else if (whole->bd_holder == bd_may_claim)
40781 + else if (whole->bd_holder == (void *)bd_may_claim)
40782 return true; /* is a partition of a device that is being partitioned */
40783 else if (whole->bd_holder != NULL)
40784 return false; /* is a partition of a held device */
40785 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40786 index dede441..f2a2507 100644
40787 --- a/fs/btrfs/ctree.c
40788 +++ b/fs/btrfs/ctree.c
40789 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40790 free_extent_buffer(buf);
40791 add_root_to_dirty_list(root);
40792 } else {
40793 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40794 - parent_start = parent->start;
40795 - else
40796 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40797 + if (parent)
40798 + parent_start = parent->start;
40799 + else
40800 + parent_start = 0;
40801 + } else
40802 parent_start = 0;
40803
40804 WARN_ON(trans->transid != btrfs_header_generation(parent));
40805 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40806 index fd1a06d..6e9033d 100644
40807 --- a/fs/btrfs/inode.c
40808 +++ b/fs/btrfs/inode.c
40809 @@ -6895,7 +6895,7 @@ fail:
40810 return -ENOMEM;
40811 }
40812
40813 -static int btrfs_getattr(struct vfsmount *mnt,
40814 +int btrfs_getattr(struct vfsmount *mnt,
40815 struct dentry *dentry, struct kstat *stat)
40816 {
40817 struct inode *inode = dentry->d_inode;
40818 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40819 return 0;
40820 }
40821
40822 +EXPORT_SYMBOL(btrfs_getattr);
40823 +
40824 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40825 +{
40826 + return BTRFS_I(inode)->root->anon_dev;
40827 +}
40828 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40829 +
40830 /*
40831 * If a file is moved, it will inherit the cow and compression flags of the new
40832 * directory.
40833 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40834 index c04f02c..f5c9e2e 100644
40835 --- a/fs/btrfs/ioctl.c
40836 +++ b/fs/btrfs/ioctl.c
40837 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40838 for (i = 0; i < num_types; i++) {
40839 struct btrfs_space_info *tmp;
40840
40841 + /* Don't copy in more than we allocated */
40842 if (!slot_count)
40843 break;
40844
40845 + slot_count--;
40846 +
40847 info = NULL;
40848 rcu_read_lock();
40849 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40850 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40851 memcpy(dest, &space, sizeof(space));
40852 dest++;
40853 space_args.total_spaces++;
40854 - slot_count--;
40855 }
40856 - if (!slot_count)
40857 - break;
40858 }
40859 up_read(&info->groups_sem);
40860 }
40861
40862 - user_dest = (struct btrfs_ioctl_space_info *)
40863 + user_dest = (struct btrfs_ioctl_space_info __user *)
40864 (arg + sizeof(struct btrfs_ioctl_space_args));
40865
40866 if (copy_to_user(user_dest, dest_orig, alloc_size))
40867 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40868 index cfb5543..1ae7347 100644
40869 --- a/fs/btrfs/relocation.c
40870 +++ b/fs/btrfs/relocation.c
40871 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40872 }
40873 spin_unlock(&rc->reloc_root_tree.lock);
40874
40875 - BUG_ON((struct btrfs_root *)node->data != root);
40876 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40877
40878 if (!del) {
40879 spin_lock(&rc->reloc_root_tree.lock);
40880 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40881 index 622f469..e8d2d55 100644
40882 --- a/fs/cachefiles/bind.c
40883 +++ b/fs/cachefiles/bind.c
40884 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40885 args);
40886
40887 /* start by checking things over */
40888 - ASSERT(cache->fstop_percent >= 0 &&
40889 - cache->fstop_percent < cache->fcull_percent &&
40890 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40891 cache->fcull_percent < cache->frun_percent &&
40892 cache->frun_percent < 100);
40893
40894 - ASSERT(cache->bstop_percent >= 0 &&
40895 - cache->bstop_percent < cache->bcull_percent &&
40896 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40897 cache->bcull_percent < cache->brun_percent &&
40898 cache->brun_percent < 100);
40899
40900 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40901 index 0a1467b..6a53245 100644
40902 --- a/fs/cachefiles/daemon.c
40903 +++ b/fs/cachefiles/daemon.c
40904 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40905 if (n > buflen)
40906 return -EMSGSIZE;
40907
40908 - if (copy_to_user(_buffer, buffer, n) != 0)
40909 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40910 return -EFAULT;
40911
40912 return n;
40913 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40914 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40915 return -EIO;
40916
40917 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40918 + if (datalen > PAGE_SIZE - 1)
40919 return -EOPNOTSUPP;
40920
40921 /* drag the command string into the kernel so we can parse it */
40922 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40923 if (args[0] != '%' || args[1] != '\0')
40924 return -EINVAL;
40925
40926 - if (fstop < 0 || fstop >= cache->fcull_percent)
40927 + if (fstop >= cache->fcull_percent)
40928 return cachefiles_daemon_range_error(cache, args);
40929
40930 cache->fstop_percent = fstop;
40931 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40932 if (args[0] != '%' || args[1] != '\0')
40933 return -EINVAL;
40934
40935 - if (bstop < 0 || bstop >= cache->bcull_percent)
40936 + if (bstop >= cache->bcull_percent)
40937 return cachefiles_daemon_range_error(cache, args);
40938
40939 cache->bstop_percent = bstop;
40940 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40941 index bd6bc1b..b627b53 100644
40942 --- a/fs/cachefiles/internal.h
40943 +++ b/fs/cachefiles/internal.h
40944 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40945 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40946 struct rb_root active_nodes; /* active nodes (can't be culled) */
40947 rwlock_t active_lock; /* lock for active_nodes */
40948 - atomic_t gravecounter; /* graveyard uniquifier */
40949 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40950 unsigned frun_percent; /* when to stop culling (% files) */
40951 unsigned fcull_percent; /* when to start culling (% files) */
40952 unsigned fstop_percent; /* when to stop allocating (% files) */
40953 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40954 * proc.c
40955 */
40956 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40957 -extern atomic_t cachefiles_lookup_histogram[HZ];
40958 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40959 -extern atomic_t cachefiles_create_histogram[HZ];
40960 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40961 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40962 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40963
40964 extern int __init cachefiles_proc_init(void);
40965 extern void cachefiles_proc_cleanup(void);
40966 static inline
40967 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40968 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40969 {
40970 unsigned long jif = jiffies - start_jif;
40971 if (jif >= HZ)
40972 jif = HZ - 1;
40973 - atomic_inc(&histogram[jif]);
40974 + atomic_inc_unchecked(&histogram[jif]);
40975 }
40976
40977 #else
40978 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40979 index a0358c2..d6137f2 100644
40980 --- a/fs/cachefiles/namei.c
40981 +++ b/fs/cachefiles/namei.c
40982 @@ -318,7 +318,7 @@ try_again:
40983 /* first step is to make up a grave dentry in the graveyard */
40984 sprintf(nbuffer, "%08x%08x",
40985 (uint32_t) get_seconds(),
40986 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40987 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40988
40989 /* do the multiway lock magic */
40990 trap = lock_rename(cache->graveyard, dir);
40991 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40992 index eccd339..4c1d995 100644
40993 --- a/fs/cachefiles/proc.c
40994 +++ b/fs/cachefiles/proc.c
40995 @@ -14,9 +14,9 @@
40996 #include <linux/seq_file.h>
40997 #include "internal.h"
40998
40999 -atomic_t cachefiles_lookup_histogram[HZ];
41000 -atomic_t cachefiles_mkdir_histogram[HZ];
41001 -atomic_t cachefiles_create_histogram[HZ];
41002 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
41003 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
41004 +atomic_unchecked_t cachefiles_create_histogram[HZ];
41005
41006 /*
41007 * display the latency histogram
41008 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
41009 return 0;
41010 default:
41011 index = (unsigned long) v - 3;
41012 - x = atomic_read(&cachefiles_lookup_histogram[index]);
41013 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
41014 - z = atomic_read(&cachefiles_create_histogram[index]);
41015 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
41016 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
41017 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
41018 if (x == 0 && y == 0 && z == 0)
41019 return 0;
41020
41021 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
41022 index 0e3c092..818480e 100644
41023 --- a/fs/cachefiles/rdwr.c
41024 +++ b/fs/cachefiles/rdwr.c
41025 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
41026 old_fs = get_fs();
41027 set_fs(KERNEL_DS);
41028 ret = file->f_op->write(
41029 - file, (const void __user *) data, len, &pos);
41030 + file, (const void __force_user *) data, len, &pos);
41031 set_fs(old_fs);
41032 kunmap(page);
41033 if (ret != len)
41034 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
41035 index 9895400..fa40a7d 100644
41036 --- a/fs/ceph/dir.c
41037 +++ b/fs/ceph/dir.c
41038 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
41039 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
41040 struct ceph_mds_client *mdsc = fsc->mdsc;
41041 unsigned frag = fpos_frag(filp->f_pos);
41042 - int off = fpos_off(filp->f_pos);
41043 + unsigned int off = fpos_off(filp->f_pos);
41044 int err;
41045 u32 ftype;
41046 struct ceph_mds_reply_info_parsed *rinfo;
41047 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
41048 index 84e8c07..6170d31 100644
41049 --- a/fs/cifs/cifs_debug.c
41050 +++ b/fs/cifs/cifs_debug.c
41051 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41052
41053 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
41054 #ifdef CONFIG_CIFS_STATS2
41055 - atomic_set(&totBufAllocCount, 0);
41056 - atomic_set(&totSmBufAllocCount, 0);
41057 + atomic_set_unchecked(&totBufAllocCount, 0);
41058 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41059 #endif /* CONFIG_CIFS_STATS2 */
41060 spin_lock(&cifs_tcp_ses_lock);
41061 list_for_each(tmp1, &cifs_tcp_ses_list) {
41062 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
41063 tcon = list_entry(tmp3,
41064 struct cifs_tcon,
41065 tcon_list);
41066 - atomic_set(&tcon->num_smbs_sent, 0);
41067 - atomic_set(&tcon->num_writes, 0);
41068 - atomic_set(&tcon->num_reads, 0);
41069 - atomic_set(&tcon->num_oplock_brks, 0);
41070 - atomic_set(&tcon->num_opens, 0);
41071 - atomic_set(&tcon->num_posixopens, 0);
41072 - atomic_set(&tcon->num_posixmkdirs, 0);
41073 - atomic_set(&tcon->num_closes, 0);
41074 - atomic_set(&tcon->num_deletes, 0);
41075 - atomic_set(&tcon->num_mkdirs, 0);
41076 - atomic_set(&tcon->num_rmdirs, 0);
41077 - atomic_set(&tcon->num_renames, 0);
41078 - atomic_set(&tcon->num_t2renames, 0);
41079 - atomic_set(&tcon->num_ffirst, 0);
41080 - atomic_set(&tcon->num_fnext, 0);
41081 - atomic_set(&tcon->num_fclose, 0);
41082 - atomic_set(&tcon->num_hardlinks, 0);
41083 - atomic_set(&tcon->num_symlinks, 0);
41084 - atomic_set(&tcon->num_locks, 0);
41085 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
41086 + atomic_set_unchecked(&tcon->num_writes, 0);
41087 + atomic_set_unchecked(&tcon->num_reads, 0);
41088 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
41089 + atomic_set_unchecked(&tcon->num_opens, 0);
41090 + atomic_set_unchecked(&tcon->num_posixopens, 0);
41091 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
41092 + atomic_set_unchecked(&tcon->num_closes, 0);
41093 + atomic_set_unchecked(&tcon->num_deletes, 0);
41094 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
41095 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
41096 + atomic_set_unchecked(&tcon->num_renames, 0);
41097 + atomic_set_unchecked(&tcon->num_t2renames, 0);
41098 + atomic_set_unchecked(&tcon->num_ffirst, 0);
41099 + atomic_set_unchecked(&tcon->num_fnext, 0);
41100 + atomic_set_unchecked(&tcon->num_fclose, 0);
41101 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
41102 + atomic_set_unchecked(&tcon->num_symlinks, 0);
41103 + atomic_set_unchecked(&tcon->num_locks, 0);
41104 }
41105 }
41106 }
41107 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41108 smBufAllocCount.counter, cifs_min_small);
41109 #ifdef CONFIG_CIFS_STATS2
41110 seq_printf(m, "Total Large %d Small %d Allocations\n",
41111 - atomic_read(&totBufAllocCount),
41112 - atomic_read(&totSmBufAllocCount));
41113 + atomic_read_unchecked(&totBufAllocCount),
41114 + atomic_read_unchecked(&totSmBufAllocCount));
41115 #endif /* CONFIG_CIFS_STATS2 */
41116
41117 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
41118 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
41119 if (tcon->need_reconnect)
41120 seq_puts(m, "\tDISCONNECTED ");
41121 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
41122 - atomic_read(&tcon->num_smbs_sent),
41123 - atomic_read(&tcon->num_oplock_brks));
41124 + atomic_read_unchecked(&tcon->num_smbs_sent),
41125 + atomic_read_unchecked(&tcon->num_oplock_brks));
41126 seq_printf(m, "\nReads: %d Bytes: %lld",
41127 - atomic_read(&tcon->num_reads),
41128 + atomic_read_unchecked(&tcon->num_reads),
41129 (long long)(tcon->bytes_read));
41130 seq_printf(m, "\nWrites: %d Bytes: %lld",
41131 - atomic_read(&tcon->num_writes),
41132 + atomic_read_unchecked(&tcon->num_writes),
41133 (long long)(tcon->bytes_written));
41134 seq_printf(m, "\nFlushes: %d",
41135 - atomic_read(&tcon->num_flushes));
41136 + atomic_read_unchecked(&tcon->num_flushes));
41137 seq_printf(m, "\nLocks: %d HardLinks: %d "
41138 "Symlinks: %d",
41139 - atomic_read(&tcon->num_locks),
41140 - atomic_read(&tcon->num_hardlinks),
41141 - atomic_read(&tcon->num_symlinks));
41142 + atomic_read_unchecked(&tcon->num_locks),
41143 + atomic_read_unchecked(&tcon->num_hardlinks),
41144 + atomic_read_unchecked(&tcon->num_symlinks));
41145 seq_printf(m, "\nOpens: %d Closes: %d "
41146 "Deletes: %d",
41147 - atomic_read(&tcon->num_opens),
41148 - atomic_read(&tcon->num_closes),
41149 - atomic_read(&tcon->num_deletes));
41150 + atomic_read_unchecked(&tcon->num_opens),
41151 + atomic_read_unchecked(&tcon->num_closes),
41152 + atomic_read_unchecked(&tcon->num_deletes));
41153 seq_printf(m, "\nPosix Opens: %d "
41154 "Posix Mkdirs: %d",
41155 - atomic_read(&tcon->num_posixopens),
41156 - atomic_read(&tcon->num_posixmkdirs));
41157 + atomic_read_unchecked(&tcon->num_posixopens),
41158 + atomic_read_unchecked(&tcon->num_posixmkdirs));
41159 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
41160 - atomic_read(&tcon->num_mkdirs),
41161 - atomic_read(&tcon->num_rmdirs));
41162 + atomic_read_unchecked(&tcon->num_mkdirs),
41163 + atomic_read_unchecked(&tcon->num_rmdirs));
41164 seq_printf(m, "\nRenames: %d T2 Renames %d",
41165 - atomic_read(&tcon->num_renames),
41166 - atomic_read(&tcon->num_t2renames));
41167 + atomic_read_unchecked(&tcon->num_renames),
41168 + atomic_read_unchecked(&tcon->num_t2renames));
41169 seq_printf(m, "\nFindFirst: %d FNext %d "
41170 "FClose %d",
41171 - atomic_read(&tcon->num_ffirst),
41172 - atomic_read(&tcon->num_fnext),
41173 - atomic_read(&tcon->num_fclose));
41174 + atomic_read_unchecked(&tcon->num_ffirst),
41175 + atomic_read_unchecked(&tcon->num_fnext),
41176 + atomic_read_unchecked(&tcon->num_fclose));
41177 }
41178 }
41179 }
41180 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
41181 index 8f1fe32..38f9e27 100644
41182 --- a/fs/cifs/cifsfs.c
41183 +++ b/fs/cifs/cifsfs.c
41184 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
41185 cifs_req_cachep = kmem_cache_create("cifs_request",
41186 CIFSMaxBufSize +
41187 MAX_CIFS_HDR_SIZE, 0,
41188 - SLAB_HWCACHE_ALIGN, NULL);
41189 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41190 if (cifs_req_cachep == NULL)
41191 return -ENOMEM;
41192
41193 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41194 efficient to alloc 1 per page off the slab compared to 17K (5page)
41195 alloc of large cifs buffers even when page debugging is on */
41196 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41197 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41198 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41199 NULL);
41200 if (cifs_sm_req_cachep == NULL) {
41201 mempool_destroy(cifs_req_poolp);
41202 @@ -1101,8 +1101,8 @@ init_cifs(void)
41203 atomic_set(&bufAllocCount, 0);
41204 atomic_set(&smBufAllocCount, 0);
41205 #ifdef CONFIG_CIFS_STATS2
41206 - atomic_set(&totBufAllocCount, 0);
41207 - atomic_set(&totSmBufAllocCount, 0);
41208 + atomic_set_unchecked(&totBufAllocCount, 0);
41209 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41210 #endif /* CONFIG_CIFS_STATS2 */
41211
41212 atomic_set(&midCount, 0);
41213 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41214 index 8238aa1..0347196 100644
41215 --- a/fs/cifs/cifsglob.h
41216 +++ b/fs/cifs/cifsglob.h
41217 @@ -392,28 +392,28 @@ struct cifs_tcon {
41218 __u16 Flags; /* optional support bits */
41219 enum statusEnum tidStatus;
41220 #ifdef CONFIG_CIFS_STATS
41221 - atomic_t num_smbs_sent;
41222 - atomic_t num_writes;
41223 - atomic_t num_reads;
41224 - atomic_t num_flushes;
41225 - atomic_t num_oplock_brks;
41226 - atomic_t num_opens;
41227 - atomic_t num_closes;
41228 - atomic_t num_deletes;
41229 - atomic_t num_mkdirs;
41230 - atomic_t num_posixopens;
41231 - atomic_t num_posixmkdirs;
41232 - atomic_t num_rmdirs;
41233 - atomic_t num_renames;
41234 - atomic_t num_t2renames;
41235 - atomic_t num_ffirst;
41236 - atomic_t num_fnext;
41237 - atomic_t num_fclose;
41238 - atomic_t num_hardlinks;
41239 - atomic_t num_symlinks;
41240 - atomic_t num_locks;
41241 - atomic_t num_acl_get;
41242 - atomic_t num_acl_set;
41243 + atomic_unchecked_t num_smbs_sent;
41244 + atomic_unchecked_t num_writes;
41245 + atomic_unchecked_t num_reads;
41246 + atomic_unchecked_t num_flushes;
41247 + atomic_unchecked_t num_oplock_brks;
41248 + atomic_unchecked_t num_opens;
41249 + atomic_unchecked_t num_closes;
41250 + atomic_unchecked_t num_deletes;
41251 + atomic_unchecked_t num_mkdirs;
41252 + atomic_unchecked_t num_posixopens;
41253 + atomic_unchecked_t num_posixmkdirs;
41254 + atomic_unchecked_t num_rmdirs;
41255 + atomic_unchecked_t num_renames;
41256 + atomic_unchecked_t num_t2renames;
41257 + atomic_unchecked_t num_ffirst;
41258 + atomic_unchecked_t num_fnext;
41259 + atomic_unchecked_t num_fclose;
41260 + atomic_unchecked_t num_hardlinks;
41261 + atomic_unchecked_t num_symlinks;
41262 + atomic_unchecked_t num_locks;
41263 + atomic_unchecked_t num_acl_get;
41264 + atomic_unchecked_t num_acl_set;
41265 #ifdef CONFIG_CIFS_STATS2
41266 unsigned long long time_writes;
41267 unsigned long long time_reads;
41268 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41269 }
41270
41271 #ifdef CONFIG_CIFS_STATS
41272 -#define cifs_stats_inc atomic_inc
41273 +#define cifs_stats_inc atomic_inc_unchecked
41274
41275 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41276 unsigned int bytes)
41277 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41278 /* Various Debug counters */
41279 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41280 #ifdef CONFIG_CIFS_STATS2
41281 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41282 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41283 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41284 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41285 #endif
41286 GLOBAL_EXTERN atomic_t smBufAllocCount;
41287 GLOBAL_EXTERN atomic_t midCount;
41288 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41289 index 6b0e064..94e6c3c 100644
41290 --- a/fs/cifs/link.c
41291 +++ b/fs/cifs/link.c
41292 @@ -600,7 +600,7 @@ symlink_exit:
41293
41294 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41295 {
41296 - char *p = nd_get_link(nd);
41297 + const char *p = nd_get_link(nd);
41298 if (!IS_ERR(p))
41299 kfree(p);
41300 }
41301 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41302 index 703ef5c..2a44ed5 100644
41303 --- a/fs/cifs/misc.c
41304 +++ b/fs/cifs/misc.c
41305 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41306 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41307 atomic_inc(&bufAllocCount);
41308 #ifdef CONFIG_CIFS_STATS2
41309 - atomic_inc(&totBufAllocCount);
41310 + atomic_inc_unchecked(&totBufAllocCount);
41311 #endif /* CONFIG_CIFS_STATS2 */
41312 }
41313
41314 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41315 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41316 atomic_inc(&smBufAllocCount);
41317 #ifdef CONFIG_CIFS_STATS2
41318 - atomic_inc(&totSmBufAllocCount);
41319 + atomic_inc_unchecked(&totSmBufAllocCount);
41320 #endif /* CONFIG_CIFS_STATS2 */
41321
41322 }
41323 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41324 index 6901578..d402eb5 100644
41325 --- a/fs/coda/cache.c
41326 +++ b/fs/coda/cache.c
41327 @@ -24,7 +24,7 @@
41328 #include "coda_linux.h"
41329 #include "coda_cache.h"
41330
41331 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41332 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41333
41334 /* replace or extend an acl cache hit */
41335 void coda_cache_enter(struct inode *inode, int mask)
41336 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41337 struct coda_inode_info *cii = ITOC(inode);
41338
41339 spin_lock(&cii->c_lock);
41340 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41341 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41342 if (cii->c_uid != current_fsuid()) {
41343 cii->c_uid = current_fsuid();
41344 cii->c_cached_perm = mask;
41345 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41346 {
41347 struct coda_inode_info *cii = ITOC(inode);
41348 spin_lock(&cii->c_lock);
41349 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41350 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41351 spin_unlock(&cii->c_lock);
41352 }
41353
41354 /* remove all acl caches */
41355 void coda_cache_clear_all(struct super_block *sb)
41356 {
41357 - atomic_inc(&permission_epoch);
41358 + atomic_inc_unchecked(&permission_epoch);
41359 }
41360
41361
41362 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41363 spin_lock(&cii->c_lock);
41364 hit = (mask & cii->c_cached_perm) == mask &&
41365 cii->c_uid == current_fsuid() &&
41366 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41367 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41368 spin_unlock(&cii->c_lock);
41369
41370 return hit;
41371 diff --git a/fs/compat.c b/fs/compat.c
41372 index c987875..08771ca 100644
41373 --- a/fs/compat.c
41374 +++ b/fs/compat.c
41375 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41376 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41377 {
41378 compat_ino_t ino = stat->ino;
41379 - typeof(ubuf->st_uid) uid = 0;
41380 - typeof(ubuf->st_gid) gid = 0;
41381 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41382 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41383 int err;
41384
41385 SET_UID(uid, stat->uid);
41386 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41387
41388 set_fs(KERNEL_DS);
41389 /* The __user pointer cast is valid because of the set_fs() */
41390 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41391 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41392 set_fs(oldfs);
41393 /* truncating is ok because it's a user address */
41394 if (!ret)
41395 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41396 goto out;
41397
41398 ret = -EINVAL;
41399 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41400 + if (nr_segs > UIO_MAXIOV)
41401 goto out;
41402 if (nr_segs > fast_segs) {
41403 ret = -ENOMEM;
41404 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41405
41406 struct compat_readdir_callback {
41407 struct compat_old_linux_dirent __user *dirent;
41408 + struct file * file;
41409 int result;
41410 };
41411
41412 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41413 buf->result = -EOVERFLOW;
41414 return -EOVERFLOW;
41415 }
41416 +
41417 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41418 + return 0;
41419 +
41420 buf->result++;
41421 dirent = buf->dirent;
41422 if (!access_ok(VERIFY_WRITE, dirent,
41423 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41424
41425 buf.result = 0;
41426 buf.dirent = dirent;
41427 + buf.file = file;
41428
41429 error = vfs_readdir(file, compat_fillonedir, &buf);
41430 if (buf.result)
41431 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41432 struct compat_getdents_callback {
41433 struct compat_linux_dirent __user *current_dir;
41434 struct compat_linux_dirent __user *previous;
41435 + struct file * file;
41436 int count;
41437 int error;
41438 };
41439 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41440 buf->error = -EOVERFLOW;
41441 return -EOVERFLOW;
41442 }
41443 +
41444 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41445 + return 0;
41446 +
41447 dirent = buf->previous;
41448 if (dirent) {
41449 if (__put_user(offset, &dirent->d_off))
41450 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41451 buf.previous = NULL;
41452 buf.count = count;
41453 buf.error = 0;
41454 + buf.file = file;
41455
41456 error = vfs_readdir(file, compat_filldir, &buf);
41457 if (error >= 0)
41458 @@ -1003,6 +1015,7 @@ out:
41459 struct compat_getdents_callback64 {
41460 struct linux_dirent64 __user *current_dir;
41461 struct linux_dirent64 __user *previous;
41462 + struct file * file;
41463 int count;
41464 int error;
41465 };
41466 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41467 buf->error = -EINVAL; /* only used if we fail.. */
41468 if (reclen > buf->count)
41469 return -EINVAL;
41470 +
41471 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41472 + return 0;
41473 +
41474 dirent = buf->previous;
41475
41476 if (dirent) {
41477 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41478 buf.previous = NULL;
41479 buf.count = count;
41480 buf.error = 0;
41481 + buf.file = file;
41482
41483 error = vfs_readdir(file, compat_filldir64, &buf);
41484 if (error >= 0)
41485 error = buf.error;
41486 lastdirent = buf.previous;
41487 if (lastdirent) {
41488 - typeof(lastdirent->d_off) d_off = file->f_pos;
41489 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41490 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41491 error = -EFAULT;
41492 else
41493 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41494 index 112e45a..b59845b 100644
41495 --- a/fs/compat_binfmt_elf.c
41496 +++ b/fs/compat_binfmt_elf.c
41497 @@ -30,11 +30,13 @@
41498 #undef elf_phdr
41499 #undef elf_shdr
41500 #undef elf_note
41501 +#undef elf_dyn
41502 #undef elf_addr_t
41503 #define elfhdr elf32_hdr
41504 #define elf_phdr elf32_phdr
41505 #define elf_shdr elf32_shdr
41506 #define elf_note elf32_note
41507 +#define elf_dyn Elf32_Dyn
41508 #define elf_addr_t Elf32_Addr
41509
41510 /*
41511 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41512 index 51352de..93292ff 100644
41513 --- a/fs/compat_ioctl.c
41514 +++ b/fs/compat_ioctl.c
41515 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41516
41517 err = get_user(palp, &up->palette);
41518 err |= get_user(length, &up->length);
41519 + if (err)
41520 + return -EFAULT;
41521
41522 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41523 err = put_user(compat_ptr(palp), &up_native->palette);
41524 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41525 return -EFAULT;
41526 if (__get_user(udata, &ss32->iomem_base))
41527 return -EFAULT;
41528 - ss.iomem_base = compat_ptr(udata);
41529 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41530 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41531 __get_user(ss.port_high, &ss32->port_high))
41532 return -EFAULT;
41533 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41534 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41535 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41536 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41537 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41538 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41539 return -EFAULT;
41540
41541 return ioctl_preallocate(file, p);
41542 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41543 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41544 {
41545 unsigned int a, b;
41546 - a = *(unsigned int *)p;
41547 - b = *(unsigned int *)q;
41548 + a = *(const unsigned int *)p;
41549 + b = *(const unsigned int *)q;
41550 if (a > b)
41551 return 1;
41552 if (a < b)
41553 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41554 index 9a37a9b..35792b6 100644
41555 --- a/fs/configfs/dir.c
41556 +++ b/fs/configfs/dir.c
41557 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41558 }
41559 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41560 struct configfs_dirent *next;
41561 - const char * name;
41562 + const unsigned char * name;
41563 + char d_name[sizeof(next->s_dentry->d_iname)];
41564 int len;
41565 struct inode *inode = NULL;
41566
41567 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41568 continue;
41569
41570 name = configfs_get_name(next);
41571 - len = strlen(name);
41572 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41573 + len = next->s_dentry->d_name.len;
41574 + memcpy(d_name, name, len);
41575 + name = d_name;
41576 + } else
41577 + len = strlen(name);
41578
41579 /*
41580 * We'll have a dentry and an inode for
41581 diff --git a/fs/dcache.c b/fs/dcache.c
41582 index f7908ae..920a680 100644
41583 --- a/fs/dcache.c
41584 +++ b/fs/dcache.c
41585 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41586 mempages -= reserve;
41587
41588 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41589 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41590 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41591
41592 dcache_init();
41593 inode_init();
41594 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41595 index f3a257d..715ac0f 100644
41596 --- a/fs/debugfs/inode.c
41597 +++ b/fs/debugfs/inode.c
41598 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41599 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41600 {
41601 return debugfs_create_file(name,
41602 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41603 + S_IFDIR | S_IRWXU,
41604 +#else
41605 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41606 +#endif
41607 parent, NULL, NULL);
41608 }
41609 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41610 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41611 index af11098..81e3bbe 100644
41612 --- a/fs/ecryptfs/inode.c
41613 +++ b/fs/ecryptfs/inode.c
41614 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41615 old_fs = get_fs();
41616 set_fs(get_ds());
41617 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41618 - (char __user *)lower_buf,
41619 + (char __force_user *)lower_buf,
41620 lower_bufsiz);
41621 set_fs(old_fs);
41622 if (rc < 0)
41623 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41624 }
41625 old_fs = get_fs();
41626 set_fs(get_ds());
41627 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41628 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41629 set_fs(old_fs);
41630 if (rc < 0) {
41631 kfree(buf);
41632 @@ -752,7 +752,7 @@ out:
41633 static void
41634 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41635 {
41636 - char *buf = nd_get_link(nd);
41637 + const char *buf = nd_get_link(nd);
41638 if (!IS_ERR(buf)) {
41639 /* Free the char* */
41640 kfree(buf);
41641 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41642 index 0dc5a3d..d3cdeea 100644
41643 --- a/fs/ecryptfs/miscdev.c
41644 +++ b/fs/ecryptfs/miscdev.c
41645 @@ -328,7 +328,7 @@ check_list:
41646 goto out_unlock_msg_ctx;
41647 i = 5;
41648 if (msg_ctx->msg) {
41649 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41650 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41651 goto out_unlock_msg_ctx;
41652 i += packet_length_size;
41653 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41654 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41655 index 608c1c3..7d040a8 100644
41656 --- a/fs/ecryptfs/read_write.c
41657 +++ b/fs/ecryptfs/read_write.c
41658 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41659 return -EIO;
41660 fs_save = get_fs();
41661 set_fs(get_ds());
41662 - rc = vfs_write(lower_file, data, size, &offset);
41663 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41664 set_fs(fs_save);
41665 mark_inode_dirty_sync(ecryptfs_inode);
41666 return rc;
41667 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41668 return -EIO;
41669 fs_save = get_fs();
41670 set_fs(get_ds());
41671 - rc = vfs_read(lower_file, data, size, &offset);
41672 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41673 set_fs(fs_save);
41674 return rc;
41675 }
41676 diff --git a/fs/exec.c b/fs/exec.c
41677 index 3625464..04855f9 100644
41678 --- a/fs/exec.c
41679 +++ b/fs/exec.c
41680 @@ -55,12 +55,28 @@
41681 #include <linux/pipe_fs_i.h>
41682 #include <linux/oom.h>
41683 #include <linux/compat.h>
41684 +#include <linux/random.h>
41685 +#include <linux/seq_file.h>
41686 +
41687 +#ifdef CONFIG_PAX_REFCOUNT
41688 +#include <linux/kallsyms.h>
41689 +#include <linux/kdebug.h>
41690 +#endif
41691
41692 #include <asm/uaccess.h>
41693 #include <asm/mmu_context.h>
41694 #include <asm/tlb.h>
41695 #include "internal.h"
41696
41697 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41698 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41699 +#endif
41700 +
41701 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41702 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41703 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41704 +#endif
41705 +
41706 int core_uses_pid;
41707 char core_pattern[CORENAME_MAX_SIZE] = "core";
41708 unsigned int core_pipe_limit;
41709 @@ -70,7 +86,7 @@ struct core_name {
41710 char *corename;
41711 int used, size;
41712 };
41713 -static atomic_t call_count = ATOMIC_INIT(1);
41714 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41715
41716 /* The maximal length of core_pattern is also specified in sysctl.c */
41717
41718 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41719 int write)
41720 {
41721 struct page *page;
41722 - int ret;
41723
41724 -#ifdef CONFIG_STACK_GROWSUP
41725 - if (write) {
41726 - ret = expand_downwards(bprm->vma, pos);
41727 - if (ret < 0)
41728 - return NULL;
41729 - }
41730 -#endif
41731 - ret = get_user_pages(current, bprm->mm, pos,
41732 - 1, write, 1, &page, NULL);
41733 - if (ret <= 0)
41734 + if (0 > expand_downwards(bprm->vma, pos))
41735 + return NULL;
41736 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41737 return NULL;
41738
41739 if (write) {
41740 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41741 if (size <= ARG_MAX)
41742 return page;
41743
41744 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41745 + // only allow 1MB for argv+env on suid/sgid binaries
41746 + // to prevent easy ASLR exhaustion
41747 + if (((bprm->cred->euid != current_euid()) ||
41748 + (bprm->cred->egid != current_egid())) &&
41749 + (size > (1024 * 1024))) {
41750 + put_page(page);
41751 + return NULL;
41752 + }
41753 +#endif
41754 +
41755 /*
41756 * Limit to 1/4-th the stack size for the argv+env strings.
41757 * This ensures that:
41758 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41759 vma->vm_end = STACK_TOP_MAX;
41760 vma->vm_start = vma->vm_end - PAGE_SIZE;
41761 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41762 +
41763 +#ifdef CONFIG_PAX_SEGMEXEC
41764 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41765 +#endif
41766 +
41767 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41768 INIT_LIST_HEAD(&vma->anon_vma_chain);
41769
41770 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41771 mm->stack_vm = mm->total_vm = 1;
41772 up_write(&mm->mmap_sem);
41773 bprm->p = vma->vm_end - sizeof(void *);
41774 +
41775 +#ifdef CONFIG_PAX_RANDUSTACK
41776 + if (randomize_va_space)
41777 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41778 +#endif
41779 +
41780 return 0;
41781 err:
41782 up_write(&mm->mmap_sem);
41783 @@ -396,19 +426,7 @@ err:
41784 return err;
41785 }
41786
41787 -struct user_arg_ptr {
41788 -#ifdef CONFIG_COMPAT
41789 - bool is_compat;
41790 -#endif
41791 - union {
41792 - const char __user *const __user *native;
41793 -#ifdef CONFIG_COMPAT
41794 - compat_uptr_t __user *compat;
41795 -#endif
41796 - } ptr;
41797 -};
41798 -
41799 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41800 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41801 {
41802 const char __user *native;
41803
41804 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41805 compat_uptr_t compat;
41806
41807 if (get_user(compat, argv.ptr.compat + nr))
41808 - return ERR_PTR(-EFAULT);
41809 + return (const char __force_user *)ERR_PTR(-EFAULT);
41810
41811 return compat_ptr(compat);
41812 }
41813 #endif
41814
41815 if (get_user(native, argv.ptr.native + nr))
41816 - return ERR_PTR(-EFAULT);
41817 + return (const char __force_user *)ERR_PTR(-EFAULT);
41818
41819 return native;
41820 }
41821 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41822 if (!p)
41823 break;
41824
41825 - if (IS_ERR(p))
41826 + if (IS_ERR((const char __force_kernel *)p))
41827 return -EFAULT;
41828
41829 if (i++ >= max)
41830 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41831
41832 ret = -EFAULT;
41833 str = get_user_arg_ptr(argv, argc);
41834 - if (IS_ERR(str))
41835 + if (IS_ERR((const char __force_kernel *)str))
41836 goto out;
41837
41838 len = strnlen_user(str, MAX_ARG_STRLEN);
41839 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41840 int r;
41841 mm_segment_t oldfs = get_fs();
41842 struct user_arg_ptr argv = {
41843 - .ptr.native = (const char __user *const __user *)__argv,
41844 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41845 };
41846
41847 set_fs(KERNEL_DS);
41848 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41849 unsigned long new_end = old_end - shift;
41850 struct mmu_gather tlb;
41851
41852 - BUG_ON(new_start > new_end);
41853 + if (new_start >= new_end || new_start < mmap_min_addr)
41854 + return -ENOMEM;
41855
41856 /*
41857 * ensure there are no vmas between where we want to go
41858 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41859 if (vma != find_vma(mm, new_start))
41860 return -EFAULT;
41861
41862 +#ifdef CONFIG_PAX_SEGMEXEC
41863 + BUG_ON(pax_find_mirror_vma(vma));
41864 +#endif
41865 +
41866 /*
41867 * cover the whole range: [new_start, old_end)
41868 */
41869 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41870 stack_top = arch_align_stack(stack_top);
41871 stack_top = PAGE_ALIGN(stack_top);
41872
41873 - if (unlikely(stack_top < mmap_min_addr) ||
41874 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41875 - return -ENOMEM;
41876 -
41877 stack_shift = vma->vm_end - stack_top;
41878
41879 bprm->p -= stack_shift;
41880 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41881 bprm->exec -= stack_shift;
41882
41883 down_write(&mm->mmap_sem);
41884 +
41885 + /* Move stack pages down in memory. */
41886 + if (stack_shift) {
41887 + ret = shift_arg_pages(vma, stack_shift);
41888 + if (ret)
41889 + goto out_unlock;
41890 + }
41891 +
41892 vm_flags = VM_STACK_FLAGS;
41893
41894 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41895 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41896 + vm_flags &= ~VM_EXEC;
41897 +
41898 +#ifdef CONFIG_PAX_MPROTECT
41899 + if (mm->pax_flags & MF_PAX_MPROTECT)
41900 + vm_flags &= ~VM_MAYEXEC;
41901 +#endif
41902 +
41903 + }
41904 +#endif
41905 +
41906 /*
41907 * Adjust stack execute permissions; explicitly enable for
41908 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41909 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41910 goto out_unlock;
41911 BUG_ON(prev != vma);
41912
41913 - /* Move stack pages down in memory. */
41914 - if (stack_shift) {
41915 - ret = shift_arg_pages(vma, stack_shift);
41916 - if (ret)
41917 - goto out_unlock;
41918 - }
41919 -
41920 /* mprotect_fixup is overkill to remove the temporary stack flags */
41921 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41922
41923 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41924 old_fs = get_fs();
41925 set_fs(get_ds());
41926 /* The cast to a user pointer is valid due to the set_fs() */
41927 - result = vfs_read(file, (void __user *)addr, count, &pos);
41928 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41929 set_fs(old_fs);
41930 return result;
41931 }
41932 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41933 perf_event_comm(tsk);
41934 }
41935
41936 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41937 +{
41938 + int i, ch;
41939 +
41940 + /* Copies the binary name from after last slash */
41941 + for (i = 0; (ch = *(fn++)) != '\0';) {
41942 + if (ch == '/')
41943 + i = 0; /* overwrite what we wrote */
41944 + else
41945 + if (i < len - 1)
41946 + tcomm[i++] = ch;
41947 + }
41948 + tcomm[i] = '\0';
41949 +}
41950 +
41951 int flush_old_exec(struct linux_binprm * bprm)
41952 {
41953 int retval;
41954 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41955
41956 set_mm_exe_file(bprm->mm, bprm->file);
41957
41958 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41959 /*
41960 * Release all of the old mmap stuff
41961 */
41962 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41963
41964 void setup_new_exec(struct linux_binprm * bprm)
41965 {
41966 - int i, ch;
41967 - const char *name;
41968 - char tcomm[sizeof(current->comm)];
41969 -
41970 arch_pick_mmap_layout(current->mm);
41971
41972 /* This is the point of no return */
41973 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41974 else
41975 set_dumpable(current->mm, suid_dumpable);
41976
41977 - name = bprm->filename;
41978 -
41979 - /* Copies the binary name from after last slash */
41980 - for (i=0; (ch = *(name++)) != '\0';) {
41981 - if (ch == '/')
41982 - i = 0; /* overwrite what we wrote */
41983 - else
41984 - if (i < (sizeof(tcomm) - 1))
41985 - tcomm[i++] = ch;
41986 - }
41987 - tcomm[i] = '\0';
41988 - set_task_comm(current, tcomm);
41989 + set_task_comm(current, bprm->tcomm);
41990
41991 /* Set the new mm task size. We have to do that late because it may
41992 * depend on TIF_32BIT which is only updated in flush_thread() on
41993 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41994 }
41995 rcu_read_unlock();
41996
41997 - if (p->fs->users > n_fs) {
41998 + if (atomic_read(&p->fs->users) > n_fs) {
41999 bprm->unsafe |= LSM_UNSAFE_SHARE;
42000 } else {
42001 res = -EAGAIN;
42002 @@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
42003
42004 EXPORT_SYMBOL(search_binary_handler);
42005
42006 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42007 +static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
42008 +#endif
42009 +
42010 /*
42011 * sys_execve() executes a new program.
42012 */
42013 @@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
42014 struct user_arg_ptr envp,
42015 struct pt_regs *regs)
42016 {
42017 +#ifdef CONFIG_GRKERNSEC
42018 + struct file *old_exec_file;
42019 + struct acl_subject_label *old_acl;
42020 + struct rlimit old_rlim[RLIM_NLIMITS];
42021 +#endif
42022 struct linux_binprm *bprm;
42023 struct file *file;
42024 struct files_struct *displaced;
42025 @@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
42026 int retval;
42027 const struct cred *cred = current_cred();
42028
42029 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42030 +
42031 /*
42032 * We move the actual failure in case of RLIMIT_NPROC excess from
42033 * set*uid() to execve() because too many poorly written programs
42034 @@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
42035 if (IS_ERR(file))
42036 goto out_unmark;
42037
42038 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
42039 + retval = -EPERM;
42040 + goto out_file;
42041 + }
42042 +
42043 sched_exec();
42044
42045 bprm->file = file;
42046 bprm->filename = filename;
42047 bprm->interp = filename;
42048
42049 + if (gr_process_user_ban()) {
42050 + retval = -EPERM;
42051 + goto out_file;
42052 + }
42053 +
42054 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42055 + retval = -EACCES;
42056 + goto out_file;
42057 + }
42058 +
42059 retval = bprm_mm_init(bprm);
42060 if (retval)
42061 goto out_file;
42062 @@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
42063 if (retval < 0)
42064 goto out;
42065
42066 + if (!gr_tpe_allow(file)) {
42067 + retval = -EACCES;
42068 + goto out;
42069 + }
42070 +
42071 + if (gr_check_crash_exec(file)) {
42072 + retval = -EACCES;
42073 + goto out;
42074 + }
42075 +
42076 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42077 +
42078 + gr_handle_exec_args(bprm, argv);
42079 +
42080 +#ifdef CONFIG_GRKERNSEC
42081 + old_acl = current->acl;
42082 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42083 + old_exec_file = current->exec_file;
42084 + get_file(file);
42085 + current->exec_file = file;
42086 +#endif
42087 +
42088 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42089 + bprm->unsafe);
42090 + if (retval < 0)
42091 + goto out_fail;
42092 +
42093 retval = search_binary_handler(bprm,regs);
42094 if (retval < 0)
42095 - goto out;
42096 + goto out_fail;
42097 +#ifdef CONFIG_GRKERNSEC
42098 + if (old_exec_file)
42099 + fput(old_exec_file);
42100 +#endif
42101
42102 /* execve succeeded */
42103 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42104 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
42105 +#endif
42106 +
42107 current->fs->in_exec = 0;
42108 current->in_execve = 0;
42109 acct_update_integrals(current);
42110 @@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
42111 put_files_struct(displaced);
42112 return retval;
42113
42114 +out_fail:
42115 +#ifdef CONFIG_GRKERNSEC
42116 + current->acl = old_acl;
42117 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42118 + fput(current->exec_file);
42119 + current->exec_file = old_exec_file;
42120 +#endif
42121 +
42122 out:
42123 if (bprm->mm) {
42124 acct_arg_size(bprm, 0);
42125 @@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
42126 {
42127 char *old_corename = cn->corename;
42128
42129 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42130 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42131 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42132
42133 if (!cn->corename) {
42134 @@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
42135 int pid_in_pattern = 0;
42136 int err = 0;
42137
42138 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42139 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42140 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42141 cn->used = 0;
42142
42143 @@ -1812,6 +1914,218 @@ out:
42144 return ispipe;
42145 }
42146
42147 +int pax_check_flags(unsigned long *flags)
42148 +{
42149 + int retval = 0;
42150 +
42151 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42152 + if (*flags & MF_PAX_SEGMEXEC)
42153 + {
42154 + *flags &= ~MF_PAX_SEGMEXEC;
42155 + retval = -EINVAL;
42156 + }
42157 +#endif
42158 +
42159 + if ((*flags & MF_PAX_PAGEEXEC)
42160 +
42161 +#ifdef CONFIG_PAX_PAGEEXEC
42162 + && (*flags & MF_PAX_SEGMEXEC)
42163 +#endif
42164 +
42165 + )
42166 + {
42167 + *flags &= ~MF_PAX_PAGEEXEC;
42168 + retval = -EINVAL;
42169 + }
42170 +
42171 + if ((*flags & MF_PAX_MPROTECT)
42172 +
42173 +#ifdef CONFIG_PAX_MPROTECT
42174 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42175 +#endif
42176 +
42177 + )
42178 + {
42179 + *flags &= ~MF_PAX_MPROTECT;
42180 + retval = -EINVAL;
42181 + }
42182 +
42183 + if ((*flags & MF_PAX_EMUTRAMP)
42184 +
42185 +#ifdef CONFIG_PAX_EMUTRAMP
42186 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42187 +#endif
42188 +
42189 + )
42190 + {
42191 + *flags &= ~MF_PAX_EMUTRAMP;
42192 + retval = -EINVAL;
42193 + }
42194 +
42195 + return retval;
42196 +}
42197 +
42198 +EXPORT_SYMBOL(pax_check_flags);
42199 +
42200 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42201 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42202 +{
42203 + struct task_struct *tsk = current;
42204 + struct mm_struct *mm = current->mm;
42205 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42206 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42207 + char *path_exec = NULL;
42208 + char *path_fault = NULL;
42209 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
42210 +
42211 + if (buffer_exec && buffer_fault) {
42212 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42213 +
42214 + down_read(&mm->mmap_sem);
42215 + vma = mm->mmap;
42216 + while (vma && (!vma_exec || !vma_fault)) {
42217 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42218 + vma_exec = vma;
42219 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42220 + vma_fault = vma;
42221 + vma = vma->vm_next;
42222 + }
42223 + if (vma_exec) {
42224 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42225 + if (IS_ERR(path_exec))
42226 + path_exec = "<path too long>";
42227 + else {
42228 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42229 + if (path_exec) {
42230 + *path_exec = 0;
42231 + path_exec = buffer_exec;
42232 + } else
42233 + path_exec = "<path too long>";
42234 + }
42235 + }
42236 + if (vma_fault) {
42237 + start = vma_fault->vm_start;
42238 + end = vma_fault->vm_end;
42239 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42240 + if (vma_fault->vm_file) {
42241 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42242 + if (IS_ERR(path_fault))
42243 + path_fault = "<path too long>";
42244 + else {
42245 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42246 + if (path_fault) {
42247 + *path_fault = 0;
42248 + path_fault = buffer_fault;
42249 + } else
42250 + path_fault = "<path too long>";
42251 + }
42252 + } else
42253 + path_fault = "<anonymous mapping>";
42254 + }
42255 + up_read(&mm->mmap_sem);
42256 + }
42257 + if (tsk->signal->curr_ip)
42258 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42259 + else
42260 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42261 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42262 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42263 + task_uid(tsk), task_euid(tsk), pc, sp);
42264 + free_page((unsigned long)buffer_exec);
42265 + free_page((unsigned long)buffer_fault);
42266 + pax_report_insns(regs, pc, sp);
42267 + do_coredump(SIGKILL, SIGKILL, regs);
42268 +}
42269 +#endif
42270 +
42271 +#ifdef CONFIG_PAX_REFCOUNT
42272 +void pax_report_refcount_overflow(struct pt_regs *regs)
42273 +{
42274 + if (current->signal->curr_ip)
42275 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42276 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42277 + else
42278 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42279 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42280 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42281 + show_regs(regs);
42282 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42283 +}
42284 +#endif
42285 +
42286 +#ifdef CONFIG_PAX_USERCOPY
42287 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42288 +int object_is_on_stack(const void *obj, unsigned long len)
42289 +{
42290 + const void * const stack = task_stack_page(current);
42291 + const void * const stackend = stack + THREAD_SIZE;
42292 +
42293 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42294 + const void *frame = NULL;
42295 + const void *oldframe;
42296 +#endif
42297 +
42298 + if (obj + len < obj)
42299 + return -1;
42300 +
42301 + if (obj + len <= stack || stackend <= obj)
42302 + return 0;
42303 +
42304 + if (obj < stack || stackend < obj + len)
42305 + return -1;
42306 +
42307 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42308 + oldframe = __builtin_frame_address(1);
42309 + if (oldframe)
42310 + frame = __builtin_frame_address(2);
42311 + /*
42312 + low ----------------------------------------------> high
42313 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42314 + ^----------------^
42315 + allow copies only within here
42316 + */
42317 + while (stack <= frame && frame < stackend) {
42318 + /* if obj + len extends past the last frame, this
42319 + check won't pass and the next frame will be 0,
42320 + causing us to bail out and correctly report
42321 + the copy as invalid
42322 + */
42323 + if (obj + len <= frame)
42324 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42325 + oldframe = frame;
42326 + frame = *(const void * const *)frame;
42327 + }
42328 + return -1;
42329 +#else
42330 + return 1;
42331 +#endif
42332 +}
42333 +
42334 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42335 +{
42336 + if (current->signal->curr_ip)
42337 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42338 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42339 + else
42340 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42341 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42342 + dump_stack();
42343 + gr_handle_kernel_exploit();
42344 + do_group_exit(SIGKILL);
42345 +}
42346 +#endif
42347 +
42348 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42349 +void pax_track_stack(void)
42350 +{
42351 + unsigned long sp = (unsigned long)&sp;
42352 + if (sp < current_thread_info()->lowest_stack &&
42353 + sp > (unsigned long)task_stack_page(current))
42354 + current_thread_info()->lowest_stack = sp;
42355 +}
42356 +EXPORT_SYMBOL(pax_track_stack);
42357 +#endif
42358 +
42359 static int zap_process(struct task_struct *start, int exit_code)
42360 {
42361 struct task_struct *t;
42362 @@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42363 pipe = file->f_path.dentry->d_inode->i_pipe;
42364
42365 pipe_lock(pipe);
42366 - pipe->readers++;
42367 - pipe->writers--;
42368 + atomic_inc(&pipe->readers);
42369 + atomic_dec(&pipe->writers);
42370
42371 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42372 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42373 wake_up_interruptible_sync(&pipe->wait);
42374 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42375 pipe_wait(pipe);
42376 }
42377
42378 - pipe->readers--;
42379 - pipe->writers++;
42380 + atomic_dec(&pipe->readers);
42381 + atomic_inc(&pipe->writers);
42382 pipe_unlock(pipe);
42383
42384 }
42385 @@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42386 int retval = 0;
42387 int flag = 0;
42388 int ispipe;
42389 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42390 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42391 struct coredump_params cprm = {
42392 .signr = signr,
42393 .regs = regs,
42394 @@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42395
42396 audit_core_dumps(signr);
42397
42398 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42399 + gr_handle_brute_attach(current, cprm.mm_flags);
42400 +
42401 binfmt = mm->binfmt;
42402 if (!binfmt || !binfmt->core_dump)
42403 goto fail;
42404 @@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42405 }
42406 cprm.limit = RLIM_INFINITY;
42407
42408 - dump_count = atomic_inc_return(&core_dump_count);
42409 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42410 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42411 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42412 task_tgid_vnr(current), current->comm);
42413 @@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42414 } else {
42415 struct inode *inode;
42416
42417 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42418 +
42419 if (cprm.limit < binfmt->min_coredump)
42420 goto fail_unlock;
42421
42422 @@ -2246,7 +2565,7 @@ close_fail:
42423 filp_close(cprm.file, NULL);
42424 fail_dropcount:
42425 if (ispipe)
42426 - atomic_dec(&core_dump_count);
42427 + atomic_dec_unchecked(&core_dump_count);
42428 fail_unlock:
42429 kfree(cn.corename);
42430 fail_corename:
42431 @@ -2265,7 +2584,7 @@ fail:
42432 */
42433 int dump_write(struct file *file, const void *addr, int nr)
42434 {
42435 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42436 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42437 }
42438 EXPORT_SYMBOL(dump_write);
42439
42440 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42441 index a8cbe1b..fed04cb 100644
42442 --- a/fs/ext2/balloc.c
42443 +++ b/fs/ext2/balloc.c
42444 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42445
42446 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42447 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42448 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42449 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42450 sbi->s_resuid != current_fsuid() &&
42451 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42452 return 0;
42453 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42454 index a203892..4e64db5 100644
42455 --- a/fs/ext3/balloc.c
42456 +++ b/fs/ext3/balloc.c
42457 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42458
42459 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42460 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42461 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42462 + if (free_blocks < root_blocks + 1 &&
42463 !use_reservation && sbi->s_resuid != current_fsuid() &&
42464 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42465 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42466 + !capable_nolog(CAP_SYS_RESOURCE)) {
42467 return 0;
42468 }
42469 return 1;
42470 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42471 index 12ccacd..a6035fce0 100644
42472 --- a/fs/ext4/balloc.c
42473 +++ b/fs/ext4/balloc.c
42474 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42475 /* Hm, nope. Are (enough) root reserved clusters available? */
42476 if (sbi->s_resuid == current_fsuid() ||
42477 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42478 - capable(CAP_SYS_RESOURCE) ||
42479 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42480 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42481 + capable_nolog(CAP_SYS_RESOURCE)) {
42482
42483 if (free_clusters >= (nclusters + dirty_clusters))
42484 return 1;
42485 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42486 index 5b0e26a..0aa002d 100644
42487 --- a/fs/ext4/ext4.h
42488 +++ b/fs/ext4/ext4.h
42489 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42490 unsigned long s_mb_last_start;
42491
42492 /* stats for buddy allocator */
42493 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42494 - atomic_t s_bal_success; /* we found long enough chunks */
42495 - atomic_t s_bal_allocated; /* in blocks */
42496 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42497 - atomic_t s_bal_goals; /* goal hits */
42498 - atomic_t s_bal_breaks; /* too long searches */
42499 - atomic_t s_bal_2orders; /* 2^order hits */
42500 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42501 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42502 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42503 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42504 + atomic_unchecked_t s_bal_goals; /* goal hits */
42505 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42506 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42507 spinlock_t s_bal_lock;
42508 unsigned long s_mb_buddies_generated;
42509 unsigned long long s_mb_generation_time;
42510 - atomic_t s_mb_lost_chunks;
42511 - atomic_t s_mb_preallocated;
42512 - atomic_t s_mb_discarded;
42513 + atomic_unchecked_t s_mb_lost_chunks;
42514 + atomic_unchecked_t s_mb_preallocated;
42515 + atomic_unchecked_t s_mb_discarded;
42516 atomic_t s_lock_busy;
42517
42518 /* locality groups */
42519 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42520 index e2d8be8..c7f0ce9 100644
42521 --- a/fs/ext4/mballoc.c
42522 +++ b/fs/ext4/mballoc.c
42523 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42524 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42525
42526 if (EXT4_SB(sb)->s_mb_stats)
42527 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42528 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42529
42530 break;
42531 }
42532 @@ -2088,7 +2088,7 @@ repeat:
42533 ac->ac_status = AC_STATUS_CONTINUE;
42534 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42535 cr = 3;
42536 - atomic_inc(&sbi->s_mb_lost_chunks);
42537 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42538 goto repeat;
42539 }
42540 }
42541 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42542 if (sbi->s_mb_stats) {
42543 ext4_msg(sb, KERN_INFO,
42544 "mballoc: %u blocks %u reqs (%u success)",
42545 - atomic_read(&sbi->s_bal_allocated),
42546 - atomic_read(&sbi->s_bal_reqs),
42547 - atomic_read(&sbi->s_bal_success));
42548 + atomic_read_unchecked(&sbi->s_bal_allocated),
42549 + atomic_read_unchecked(&sbi->s_bal_reqs),
42550 + atomic_read_unchecked(&sbi->s_bal_success));
42551 ext4_msg(sb, KERN_INFO,
42552 "mballoc: %u extents scanned, %u goal hits, "
42553 "%u 2^N hits, %u breaks, %u lost",
42554 - atomic_read(&sbi->s_bal_ex_scanned),
42555 - atomic_read(&sbi->s_bal_goals),
42556 - atomic_read(&sbi->s_bal_2orders),
42557 - atomic_read(&sbi->s_bal_breaks),
42558 - atomic_read(&sbi->s_mb_lost_chunks));
42559 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42560 + atomic_read_unchecked(&sbi->s_bal_goals),
42561 + atomic_read_unchecked(&sbi->s_bal_2orders),
42562 + atomic_read_unchecked(&sbi->s_bal_breaks),
42563 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42564 ext4_msg(sb, KERN_INFO,
42565 "mballoc: %lu generated and it took %Lu",
42566 sbi->s_mb_buddies_generated,
42567 sbi->s_mb_generation_time);
42568 ext4_msg(sb, KERN_INFO,
42569 "mballoc: %u preallocated, %u discarded",
42570 - atomic_read(&sbi->s_mb_preallocated),
42571 - atomic_read(&sbi->s_mb_discarded));
42572 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42573 + atomic_read_unchecked(&sbi->s_mb_discarded));
42574 }
42575
42576 free_percpu(sbi->s_locality_groups);
42577 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42578 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42579
42580 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42581 - atomic_inc(&sbi->s_bal_reqs);
42582 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42583 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42584 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42585 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42586 - atomic_inc(&sbi->s_bal_success);
42587 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42588 + atomic_inc_unchecked(&sbi->s_bal_success);
42589 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42590 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42591 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42592 - atomic_inc(&sbi->s_bal_goals);
42593 + atomic_inc_unchecked(&sbi->s_bal_goals);
42594 if (ac->ac_found > sbi->s_mb_max_to_scan)
42595 - atomic_inc(&sbi->s_bal_breaks);
42596 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42597 }
42598
42599 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42600 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42601 trace_ext4_mb_new_inode_pa(ac, pa);
42602
42603 ext4_mb_use_inode_pa(ac, pa);
42604 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42605 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42606
42607 ei = EXT4_I(ac->ac_inode);
42608 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42609 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42610 trace_ext4_mb_new_group_pa(ac, pa);
42611
42612 ext4_mb_use_group_pa(ac, pa);
42613 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42614 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42615
42616 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42617 lg = ac->ac_lg;
42618 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42619 * from the bitmap and continue.
42620 */
42621 }
42622 - atomic_add(free, &sbi->s_mb_discarded);
42623 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42624
42625 return err;
42626 }
42627 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42628 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42629 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42630 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42631 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42632 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42633 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42634
42635 return 0;
42636 diff --git a/fs/fcntl.c b/fs/fcntl.c
42637 index 22764c7..86372c9 100644
42638 --- a/fs/fcntl.c
42639 +++ b/fs/fcntl.c
42640 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42641 if (err)
42642 return err;
42643
42644 + if (gr_handle_chroot_fowner(pid, type))
42645 + return -ENOENT;
42646 + if (gr_check_protected_task_fowner(pid, type))
42647 + return -EACCES;
42648 +
42649 f_modown(filp, pid, type, force);
42650 return 0;
42651 }
42652 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42653
42654 static int f_setown_ex(struct file *filp, unsigned long arg)
42655 {
42656 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42657 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42658 struct f_owner_ex owner;
42659 struct pid *pid;
42660 int type;
42661 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42662
42663 static int f_getown_ex(struct file *filp, unsigned long arg)
42664 {
42665 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42666 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42667 struct f_owner_ex owner;
42668 int ret = 0;
42669
42670 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42671 switch (cmd) {
42672 case F_DUPFD:
42673 case F_DUPFD_CLOEXEC:
42674 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42675 if (arg >= rlimit(RLIMIT_NOFILE))
42676 break;
42677 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42678 diff --git a/fs/fifo.c b/fs/fifo.c
42679 index b1a524d..4ee270e 100644
42680 --- a/fs/fifo.c
42681 +++ b/fs/fifo.c
42682 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42683 */
42684 filp->f_op = &read_pipefifo_fops;
42685 pipe->r_counter++;
42686 - if (pipe->readers++ == 0)
42687 + if (atomic_inc_return(&pipe->readers) == 1)
42688 wake_up_partner(inode);
42689
42690 - if (!pipe->writers) {
42691 + if (!atomic_read(&pipe->writers)) {
42692 if ((filp->f_flags & O_NONBLOCK)) {
42693 /* suppress POLLHUP until we have
42694 * seen a writer */
42695 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42696 * errno=ENXIO when there is no process reading the FIFO.
42697 */
42698 ret = -ENXIO;
42699 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42700 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42701 goto err;
42702
42703 filp->f_op = &write_pipefifo_fops;
42704 pipe->w_counter++;
42705 - if (!pipe->writers++)
42706 + if (atomic_inc_return(&pipe->writers) == 1)
42707 wake_up_partner(inode);
42708
42709 - if (!pipe->readers) {
42710 + if (!atomic_read(&pipe->readers)) {
42711 wait_for_partner(inode, &pipe->r_counter);
42712 if (signal_pending(current))
42713 goto err_wr;
42714 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42715 */
42716 filp->f_op = &rdwr_pipefifo_fops;
42717
42718 - pipe->readers++;
42719 - pipe->writers++;
42720 + atomic_inc(&pipe->readers);
42721 + atomic_inc(&pipe->writers);
42722 pipe->r_counter++;
42723 pipe->w_counter++;
42724 - if (pipe->readers == 1 || pipe->writers == 1)
42725 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42726 wake_up_partner(inode);
42727 break;
42728
42729 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42730 return 0;
42731
42732 err_rd:
42733 - if (!--pipe->readers)
42734 + if (atomic_dec_and_test(&pipe->readers))
42735 wake_up_interruptible(&pipe->wait);
42736 ret = -ERESTARTSYS;
42737 goto err;
42738
42739 err_wr:
42740 - if (!--pipe->writers)
42741 + if (atomic_dec_and_test(&pipe->writers))
42742 wake_up_interruptible(&pipe->wait);
42743 ret = -ERESTARTSYS;
42744 goto err;
42745
42746 err:
42747 - if (!pipe->readers && !pipe->writers)
42748 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42749 free_pipe_info(inode);
42750
42751 err_nocleanup:
42752 diff --git a/fs/file.c b/fs/file.c
42753 index 4c6992d..104cdea 100644
42754 --- a/fs/file.c
42755 +++ b/fs/file.c
42756 @@ -15,6 +15,7 @@
42757 #include <linux/slab.h>
42758 #include <linux/vmalloc.h>
42759 #include <linux/file.h>
42760 +#include <linux/security.h>
42761 #include <linux/fdtable.h>
42762 #include <linux/bitops.h>
42763 #include <linux/interrupt.h>
42764 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42765 * N.B. For clone tasks sharing a files structure, this test
42766 * will limit the total number of files that can be opened.
42767 */
42768 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42769 if (nr >= rlimit(RLIMIT_NOFILE))
42770 return -EMFILE;
42771
42772 diff --git a/fs/filesystems.c b/fs/filesystems.c
42773 index 0845f84..7b4ebef 100644
42774 --- a/fs/filesystems.c
42775 +++ b/fs/filesystems.c
42776 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42777 int len = dot ? dot - name : strlen(name);
42778
42779 fs = __get_fs_type(name, len);
42780 +
42781 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42782 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42783 +#else
42784 if (!fs && (request_module("%.*s", len, name) == 0))
42785 +#endif
42786 fs = __get_fs_type(name, len);
42787
42788 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42789 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42790 index 78b519c..a8b4979 100644
42791 --- a/fs/fs_struct.c
42792 +++ b/fs/fs_struct.c
42793 @@ -4,6 +4,7 @@
42794 #include <linux/path.h>
42795 #include <linux/slab.h>
42796 #include <linux/fs_struct.h>
42797 +#include <linux/grsecurity.h>
42798 #include "internal.h"
42799
42800 static inline void path_get_longterm(struct path *path)
42801 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42802 old_root = fs->root;
42803 fs->root = *path;
42804 path_get_longterm(path);
42805 + gr_set_chroot_entries(current, path);
42806 write_seqcount_end(&fs->seq);
42807 spin_unlock(&fs->lock);
42808 if (old_root.dentry)
42809 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42810 && fs->root.mnt == old_root->mnt) {
42811 path_get_longterm(new_root);
42812 fs->root = *new_root;
42813 + gr_set_chroot_entries(p, new_root);
42814 count++;
42815 }
42816 if (fs->pwd.dentry == old_root->dentry
42817 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42818 spin_lock(&fs->lock);
42819 write_seqcount_begin(&fs->seq);
42820 tsk->fs = NULL;
42821 - kill = !--fs->users;
42822 + gr_clear_chroot_entries(tsk);
42823 + kill = !atomic_dec_return(&fs->users);
42824 write_seqcount_end(&fs->seq);
42825 spin_unlock(&fs->lock);
42826 task_unlock(tsk);
42827 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42828 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42829 /* We don't need to lock fs - think why ;-) */
42830 if (fs) {
42831 - fs->users = 1;
42832 + atomic_set(&fs->users, 1);
42833 fs->in_exec = 0;
42834 spin_lock_init(&fs->lock);
42835 seqcount_init(&fs->seq);
42836 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42837 spin_lock(&old->lock);
42838 fs->root = old->root;
42839 path_get_longterm(&fs->root);
42840 + /* instead of calling gr_set_chroot_entries here,
42841 + we call it from every caller of this function
42842 + */
42843 fs->pwd = old->pwd;
42844 path_get_longterm(&fs->pwd);
42845 spin_unlock(&old->lock);
42846 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42847
42848 task_lock(current);
42849 spin_lock(&fs->lock);
42850 - kill = !--fs->users;
42851 + kill = !atomic_dec_return(&fs->users);
42852 current->fs = new_fs;
42853 + gr_set_chroot_entries(current, &new_fs->root);
42854 spin_unlock(&fs->lock);
42855 task_unlock(current);
42856
42857 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42858
42859 int current_umask(void)
42860 {
42861 - return current->fs->umask;
42862 + return current->fs->umask | gr_acl_umask();
42863 }
42864 EXPORT_SYMBOL(current_umask);
42865
42866 /* to be mentioned only in INIT_TASK */
42867 struct fs_struct init_fs = {
42868 - .users = 1,
42869 + .users = ATOMIC_INIT(1),
42870 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42871 .seq = SEQCNT_ZERO,
42872 .umask = 0022,
42873 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42874 task_lock(current);
42875
42876 spin_lock(&init_fs.lock);
42877 - init_fs.users++;
42878 + atomic_inc(&init_fs.users);
42879 spin_unlock(&init_fs.lock);
42880
42881 spin_lock(&fs->lock);
42882 current->fs = &init_fs;
42883 - kill = !--fs->users;
42884 + gr_set_chroot_entries(current, &current->fs->root);
42885 + kill = !atomic_dec_return(&fs->users);
42886 spin_unlock(&fs->lock);
42887
42888 task_unlock(current);
42889 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42890 index 9905350..02eaec4 100644
42891 --- a/fs/fscache/cookie.c
42892 +++ b/fs/fscache/cookie.c
42893 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42894 parent ? (char *) parent->def->name : "<no-parent>",
42895 def->name, netfs_data);
42896
42897 - fscache_stat(&fscache_n_acquires);
42898 + fscache_stat_unchecked(&fscache_n_acquires);
42899
42900 /* if there's no parent cookie, then we don't create one here either */
42901 if (!parent) {
42902 - fscache_stat(&fscache_n_acquires_null);
42903 + fscache_stat_unchecked(&fscache_n_acquires_null);
42904 _leave(" [no parent]");
42905 return NULL;
42906 }
42907 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42908 /* allocate and initialise a cookie */
42909 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42910 if (!cookie) {
42911 - fscache_stat(&fscache_n_acquires_oom);
42912 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42913 _leave(" [ENOMEM]");
42914 return NULL;
42915 }
42916 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42917
42918 switch (cookie->def->type) {
42919 case FSCACHE_COOKIE_TYPE_INDEX:
42920 - fscache_stat(&fscache_n_cookie_index);
42921 + fscache_stat_unchecked(&fscache_n_cookie_index);
42922 break;
42923 case FSCACHE_COOKIE_TYPE_DATAFILE:
42924 - fscache_stat(&fscache_n_cookie_data);
42925 + fscache_stat_unchecked(&fscache_n_cookie_data);
42926 break;
42927 default:
42928 - fscache_stat(&fscache_n_cookie_special);
42929 + fscache_stat_unchecked(&fscache_n_cookie_special);
42930 break;
42931 }
42932
42933 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42934 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42935 atomic_dec(&parent->n_children);
42936 __fscache_cookie_put(cookie);
42937 - fscache_stat(&fscache_n_acquires_nobufs);
42938 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42939 _leave(" = NULL");
42940 return NULL;
42941 }
42942 }
42943
42944 - fscache_stat(&fscache_n_acquires_ok);
42945 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42946 _leave(" = %p", cookie);
42947 return cookie;
42948 }
42949 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42950 cache = fscache_select_cache_for_object(cookie->parent);
42951 if (!cache) {
42952 up_read(&fscache_addremove_sem);
42953 - fscache_stat(&fscache_n_acquires_no_cache);
42954 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42955 _leave(" = -ENOMEDIUM [no cache]");
42956 return -ENOMEDIUM;
42957 }
42958 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42959 object = cache->ops->alloc_object(cache, cookie);
42960 fscache_stat_d(&fscache_n_cop_alloc_object);
42961 if (IS_ERR(object)) {
42962 - fscache_stat(&fscache_n_object_no_alloc);
42963 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42964 ret = PTR_ERR(object);
42965 goto error;
42966 }
42967
42968 - fscache_stat(&fscache_n_object_alloc);
42969 + fscache_stat_unchecked(&fscache_n_object_alloc);
42970
42971 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42972
42973 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42974 struct fscache_object *object;
42975 struct hlist_node *_p;
42976
42977 - fscache_stat(&fscache_n_updates);
42978 + fscache_stat_unchecked(&fscache_n_updates);
42979
42980 if (!cookie) {
42981 - fscache_stat(&fscache_n_updates_null);
42982 + fscache_stat_unchecked(&fscache_n_updates_null);
42983 _leave(" [no cookie]");
42984 return;
42985 }
42986 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42987 struct fscache_object *object;
42988 unsigned long event;
42989
42990 - fscache_stat(&fscache_n_relinquishes);
42991 + fscache_stat_unchecked(&fscache_n_relinquishes);
42992 if (retire)
42993 - fscache_stat(&fscache_n_relinquishes_retire);
42994 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42995
42996 if (!cookie) {
42997 - fscache_stat(&fscache_n_relinquishes_null);
42998 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42999 _leave(" [no cookie]");
43000 return;
43001 }
43002 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
43003
43004 /* wait for the cookie to finish being instantiated (or to fail) */
43005 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
43006 - fscache_stat(&fscache_n_relinquishes_waitcrt);
43007 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
43008 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
43009 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
43010 }
43011 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
43012 index f6aad48..88dcf26 100644
43013 --- a/fs/fscache/internal.h
43014 +++ b/fs/fscache/internal.h
43015 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
43016 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
43017 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
43018
43019 -extern atomic_t fscache_n_op_pend;
43020 -extern atomic_t fscache_n_op_run;
43021 -extern atomic_t fscache_n_op_enqueue;
43022 -extern atomic_t fscache_n_op_deferred_release;
43023 -extern atomic_t fscache_n_op_release;
43024 -extern atomic_t fscache_n_op_gc;
43025 -extern atomic_t fscache_n_op_cancelled;
43026 -extern atomic_t fscache_n_op_rejected;
43027 +extern atomic_unchecked_t fscache_n_op_pend;
43028 +extern atomic_unchecked_t fscache_n_op_run;
43029 +extern atomic_unchecked_t fscache_n_op_enqueue;
43030 +extern atomic_unchecked_t fscache_n_op_deferred_release;
43031 +extern atomic_unchecked_t fscache_n_op_release;
43032 +extern atomic_unchecked_t fscache_n_op_gc;
43033 +extern atomic_unchecked_t fscache_n_op_cancelled;
43034 +extern atomic_unchecked_t fscache_n_op_rejected;
43035
43036 -extern atomic_t fscache_n_attr_changed;
43037 -extern atomic_t fscache_n_attr_changed_ok;
43038 -extern atomic_t fscache_n_attr_changed_nobufs;
43039 -extern atomic_t fscache_n_attr_changed_nomem;
43040 -extern atomic_t fscache_n_attr_changed_calls;
43041 +extern atomic_unchecked_t fscache_n_attr_changed;
43042 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
43043 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43044 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43045 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
43046
43047 -extern atomic_t fscache_n_allocs;
43048 -extern atomic_t fscache_n_allocs_ok;
43049 -extern atomic_t fscache_n_allocs_wait;
43050 -extern atomic_t fscache_n_allocs_nobufs;
43051 -extern atomic_t fscache_n_allocs_intr;
43052 -extern atomic_t fscache_n_allocs_object_dead;
43053 -extern atomic_t fscache_n_alloc_ops;
43054 -extern atomic_t fscache_n_alloc_op_waits;
43055 +extern atomic_unchecked_t fscache_n_allocs;
43056 +extern atomic_unchecked_t fscache_n_allocs_ok;
43057 +extern atomic_unchecked_t fscache_n_allocs_wait;
43058 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
43059 +extern atomic_unchecked_t fscache_n_allocs_intr;
43060 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
43061 +extern atomic_unchecked_t fscache_n_alloc_ops;
43062 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
43063
43064 -extern atomic_t fscache_n_retrievals;
43065 -extern atomic_t fscache_n_retrievals_ok;
43066 -extern atomic_t fscache_n_retrievals_wait;
43067 -extern atomic_t fscache_n_retrievals_nodata;
43068 -extern atomic_t fscache_n_retrievals_nobufs;
43069 -extern atomic_t fscache_n_retrievals_intr;
43070 -extern atomic_t fscache_n_retrievals_nomem;
43071 -extern atomic_t fscache_n_retrievals_object_dead;
43072 -extern atomic_t fscache_n_retrieval_ops;
43073 -extern atomic_t fscache_n_retrieval_op_waits;
43074 +extern atomic_unchecked_t fscache_n_retrievals;
43075 +extern atomic_unchecked_t fscache_n_retrievals_ok;
43076 +extern atomic_unchecked_t fscache_n_retrievals_wait;
43077 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
43078 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43079 +extern atomic_unchecked_t fscache_n_retrievals_intr;
43080 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
43081 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43082 +extern atomic_unchecked_t fscache_n_retrieval_ops;
43083 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43084
43085 -extern atomic_t fscache_n_stores;
43086 -extern atomic_t fscache_n_stores_ok;
43087 -extern atomic_t fscache_n_stores_again;
43088 -extern atomic_t fscache_n_stores_nobufs;
43089 -extern atomic_t fscache_n_stores_oom;
43090 -extern atomic_t fscache_n_store_ops;
43091 -extern atomic_t fscache_n_store_calls;
43092 -extern atomic_t fscache_n_store_pages;
43093 -extern atomic_t fscache_n_store_radix_deletes;
43094 -extern atomic_t fscache_n_store_pages_over_limit;
43095 +extern atomic_unchecked_t fscache_n_stores;
43096 +extern atomic_unchecked_t fscache_n_stores_ok;
43097 +extern atomic_unchecked_t fscache_n_stores_again;
43098 +extern atomic_unchecked_t fscache_n_stores_nobufs;
43099 +extern atomic_unchecked_t fscache_n_stores_oom;
43100 +extern atomic_unchecked_t fscache_n_store_ops;
43101 +extern atomic_unchecked_t fscache_n_store_calls;
43102 +extern atomic_unchecked_t fscache_n_store_pages;
43103 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
43104 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43105
43106 -extern atomic_t fscache_n_store_vmscan_not_storing;
43107 -extern atomic_t fscache_n_store_vmscan_gone;
43108 -extern atomic_t fscache_n_store_vmscan_busy;
43109 -extern atomic_t fscache_n_store_vmscan_cancelled;
43110 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43111 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43112 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43113 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43114
43115 -extern atomic_t fscache_n_marks;
43116 -extern atomic_t fscache_n_uncaches;
43117 +extern atomic_unchecked_t fscache_n_marks;
43118 +extern atomic_unchecked_t fscache_n_uncaches;
43119
43120 -extern atomic_t fscache_n_acquires;
43121 -extern atomic_t fscache_n_acquires_null;
43122 -extern atomic_t fscache_n_acquires_no_cache;
43123 -extern atomic_t fscache_n_acquires_ok;
43124 -extern atomic_t fscache_n_acquires_nobufs;
43125 -extern atomic_t fscache_n_acquires_oom;
43126 +extern atomic_unchecked_t fscache_n_acquires;
43127 +extern atomic_unchecked_t fscache_n_acquires_null;
43128 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
43129 +extern atomic_unchecked_t fscache_n_acquires_ok;
43130 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
43131 +extern atomic_unchecked_t fscache_n_acquires_oom;
43132
43133 -extern atomic_t fscache_n_updates;
43134 -extern atomic_t fscache_n_updates_null;
43135 -extern atomic_t fscache_n_updates_run;
43136 +extern atomic_unchecked_t fscache_n_updates;
43137 +extern atomic_unchecked_t fscache_n_updates_null;
43138 +extern atomic_unchecked_t fscache_n_updates_run;
43139
43140 -extern atomic_t fscache_n_relinquishes;
43141 -extern atomic_t fscache_n_relinquishes_null;
43142 -extern atomic_t fscache_n_relinquishes_waitcrt;
43143 -extern atomic_t fscache_n_relinquishes_retire;
43144 +extern atomic_unchecked_t fscache_n_relinquishes;
43145 +extern atomic_unchecked_t fscache_n_relinquishes_null;
43146 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43147 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
43148
43149 -extern atomic_t fscache_n_cookie_index;
43150 -extern atomic_t fscache_n_cookie_data;
43151 -extern atomic_t fscache_n_cookie_special;
43152 +extern atomic_unchecked_t fscache_n_cookie_index;
43153 +extern atomic_unchecked_t fscache_n_cookie_data;
43154 +extern atomic_unchecked_t fscache_n_cookie_special;
43155
43156 -extern atomic_t fscache_n_object_alloc;
43157 -extern atomic_t fscache_n_object_no_alloc;
43158 -extern atomic_t fscache_n_object_lookups;
43159 -extern atomic_t fscache_n_object_lookups_negative;
43160 -extern atomic_t fscache_n_object_lookups_positive;
43161 -extern atomic_t fscache_n_object_lookups_timed_out;
43162 -extern atomic_t fscache_n_object_created;
43163 -extern atomic_t fscache_n_object_avail;
43164 -extern atomic_t fscache_n_object_dead;
43165 +extern atomic_unchecked_t fscache_n_object_alloc;
43166 +extern atomic_unchecked_t fscache_n_object_no_alloc;
43167 +extern atomic_unchecked_t fscache_n_object_lookups;
43168 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
43169 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
43170 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43171 +extern atomic_unchecked_t fscache_n_object_created;
43172 +extern atomic_unchecked_t fscache_n_object_avail;
43173 +extern atomic_unchecked_t fscache_n_object_dead;
43174
43175 -extern atomic_t fscache_n_checkaux_none;
43176 -extern atomic_t fscache_n_checkaux_okay;
43177 -extern atomic_t fscache_n_checkaux_update;
43178 -extern atomic_t fscache_n_checkaux_obsolete;
43179 +extern atomic_unchecked_t fscache_n_checkaux_none;
43180 +extern atomic_unchecked_t fscache_n_checkaux_okay;
43181 +extern atomic_unchecked_t fscache_n_checkaux_update;
43182 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43183
43184 extern atomic_t fscache_n_cop_alloc_object;
43185 extern atomic_t fscache_n_cop_lookup_object;
43186 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43187 atomic_inc(stat);
43188 }
43189
43190 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43191 +{
43192 + atomic_inc_unchecked(stat);
43193 +}
43194 +
43195 static inline void fscache_stat_d(atomic_t *stat)
43196 {
43197 atomic_dec(stat);
43198 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43199
43200 #define __fscache_stat(stat) (NULL)
43201 #define fscache_stat(stat) do {} while (0)
43202 +#define fscache_stat_unchecked(stat) do {} while (0)
43203 #define fscache_stat_d(stat) do {} while (0)
43204 #endif
43205
43206 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43207 index b6b897c..0ffff9c 100644
43208 --- a/fs/fscache/object.c
43209 +++ b/fs/fscache/object.c
43210 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43211 /* update the object metadata on disk */
43212 case FSCACHE_OBJECT_UPDATING:
43213 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43214 - fscache_stat(&fscache_n_updates_run);
43215 + fscache_stat_unchecked(&fscache_n_updates_run);
43216 fscache_stat(&fscache_n_cop_update_object);
43217 object->cache->ops->update_object(object);
43218 fscache_stat_d(&fscache_n_cop_update_object);
43219 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43220 spin_lock(&object->lock);
43221 object->state = FSCACHE_OBJECT_DEAD;
43222 spin_unlock(&object->lock);
43223 - fscache_stat(&fscache_n_object_dead);
43224 + fscache_stat_unchecked(&fscache_n_object_dead);
43225 goto terminal_transit;
43226
43227 /* handle the parent cache of this object being withdrawn from
43228 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43229 spin_lock(&object->lock);
43230 object->state = FSCACHE_OBJECT_DEAD;
43231 spin_unlock(&object->lock);
43232 - fscache_stat(&fscache_n_object_dead);
43233 + fscache_stat_unchecked(&fscache_n_object_dead);
43234 goto terminal_transit;
43235
43236 /* complain about the object being woken up once it is
43237 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43238 parent->cookie->def->name, cookie->def->name,
43239 object->cache->tag->name);
43240
43241 - fscache_stat(&fscache_n_object_lookups);
43242 + fscache_stat_unchecked(&fscache_n_object_lookups);
43243 fscache_stat(&fscache_n_cop_lookup_object);
43244 ret = object->cache->ops->lookup_object(object);
43245 fscache_stat_d(&fscache_n_cop_lookup_object);
43246 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43247 if (ret == -ETIMEDOUT) {
43248 /* probably stuck behind another object, so move this one to
43249 * the back of the queue */
43250 - fscache_stat(&fscache_n_object_lookups_timed_out);
43251 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43252 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43253 }
43254
43255 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43256
43257 spin_lock(&object->lock);
43258 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43259 - fscache_stat(&fscache_n_object_lookups_negative);
43260 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43261
43262 /* transit here to allow write requests to begin stacking up
43263 * and read requests to begin returning ENODATA */
43264 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43265 * result, in which case there may be data available */
43266 spin_lock(&object->lock);
43267 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43268 - fscache_stat(&fscache_n_object_lookups_positive);
43269 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43270
43271 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43272
43273 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43274 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43275 } else {
43276 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43277 - fscache_stat(&fscache_n_object_created);
43278 + fscache_stat_unchecked(&fscache_n_object_created);
43279
43280 object->state = FSCACHE_OBJECT_AVAILABLE;
43281 spin_unlock(&object->lock);
43282 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43283 fscache_enqueue_dependents(object);
43284
43285 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43286 - fscache_stat(&fscache_n_object_avail);
43287 + fscache_stat_unchecked(&fscache_n_object_avail);
43288
43289 _leave("");
43290 }
43291 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43292 enum fscache_checkaux result;
43293
43294 if (!object->cookie->def->check_aux) {
43295 - fscache_stat(&fscache_n_checkaux_none);
43296 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43297 return FSCACHE_CHECKAUX_OKAY;
43298 }
43299
43300 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43301 switch (result) {
43302 /* entry okay as is */
43303 case FSCACHE_CHECKAUX_OKAY:
43304 - fscache_stat(&fscache_n_checkaux_okay);
43305 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43306 break;
43307
43308 /* entry requires update */
43309 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43310 - fscache_stat(&fscache_n_checkaux_update);
43311 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43312 break;
43313
43314 /* entry requires deletion */
43315 case FSCACHE_CHECKAUX_OBSOLETE:
43316 - fscache_stat(&fscache_n_checkaux_obsolete);
43317 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43318 break;
43319
43320 default:
43321 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43322 index 30afdfa..2256596 100644
43323 --- a/fs/fscache/operation.c
43324 +++ b/fs/fscache/operation.c
43325 @@ -17,7 +17,7 @@
43326 #include <linux/slab.h>
43327 #include "internal.h"
43328
43329 -atomic_t fscache_op_debug_id;
43330 +atomic_unchecked_t fscache_op_debug_id;
43331 EXPORT_SYMBOL(fscache_op_debug_id);
43332
43333 /**
43334 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43335 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43336 ASSERTCMP(atomic_read(&op->usage), >, 0);
43337
43338 - fscache_stat(&fscache_n_op_enqueue);
43339 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43340 switch (op->flags & FSCACHE_OP_TYPE) {
43341 case FSCACHE_OP_ASYNC:
43342 _debug("queue async");
43343 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43344 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43345 if (op->processor)
43346 fscache_enqueue_operation(op);
43347 - fscache_stat(&fscache_n_op_run);
43348 + fscache_stat_unchecked(&fscache_n_op_run);
43349 }
43350
43351 /*
43352 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43353 if (object->n_ops > 1) {
43354 atomic_inc(&op->usage);
43355 list_add_tail(&op->pend_link, &object->pending_ops);
43356 - fscache_stat(&fscache_n_op_pend);
43357 + fscache_stat_unchecked(&fscache_n_op_pend);
43358 } else if (!list_empty(&object->pending_ops)) {
43359 atomic_inc(&op->usage);
43360 list_add_tail(&op->pend_link, &object->pending_ops);
43361 - fscache_stat(&fscache_n_op_pend);
43362 + fscache_stat_unchecked(&fscache_n_op_pend);
43363 fscache_start_operations(object);
43364 } else {
43365 ASSERTCMP(object->n_in_progress, ==, 0);
43366 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43367 object->n_exclusive++; /* reads and writes must wait */
43368 atomic_inc(&op->usage);
43369 list_add_tail(&op->pend_link, &object->pending_ops);
43370 - fscache_stat(&fscache_n_op_pend);
43371 + fscache_stat_unchecked(&fscache_n_op_pend);
43372 ret = 0;
43373 } else {
43374 /* not allowed to submit ops in any other state */
43375 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43376 if (object->n_exclusive > 0) {
43377 atomic_inc(&op->usage);
43378 list_add_tail(&op->pend_link, &object->pending_ops);
43379 - fscache_stat(&fscache_n_op_pend);
43380 + fscache_stat_unchecked(&fscache_n_op_pend);
43381 } else if (!list_empty(&object->pending_ops)) {
43382 atomic_inc(&op->usage);
43383 list_add_tail(&op->pend_link, &object->pending_ops);
43384 - fscache_stat(&fscache_n_op_pend);
43385 + fscache_stat_unchecked(&fscache_n_op_pend);
43386 fscache_start_operations(object);
43387 } else {
43388 ASSERTCMP(object->n_exclusive, ==, 0);
43389 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43390 object->n_ops++;
43391 atomic_inc(&op->usage);
43392 list_add_tail(&op->pend_link, &object->pending_ops);
43393 - fscache_stat(&fscache_n_op_pend);
43394 + fscache_stat_unchecked(&fscache_n_op_pend);
43395 ret = 0;
43396 } else if (object->state == FSCACHE_OBJECT_DYING ||
43397 object->state == FSCACHE_OBJECT_LC_DYING ||
43398 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43399 - fscache_stat(&fscache_n_op_rejected);
43400 + fscache_stat_unchecked(&fscache_n_op_rejected);
43401 ret = -ENOBUFS;
43402 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43403 fscache_report_unexpected_submission(object, op, ostate);
43404 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43405
43406 ret = -EBUSY;
43407 if (!list_empty(&op->pend_link)) {
43408 - fscache_stat(&fscache_n_op_cancelled);
43409 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43410 list_del_init(&op->pend_link);
43411 object->n_ops--;
43412 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43413 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43414 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43415 BUG();
43416
43417 - fscache_stat(&fscache_n_op_release);
43418 + fscache_stat_unchecked(&fscache_n_op_release);
43419
43420 if (op->release) {
43421 op->release(op);
43422 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43423 * lock, and defer it otherwise */
43424 if (!spin_trylock(&object->lock)) {
43425 _debug("defer put");
43426 - fscache_stat(&fscache_n_op_deferred_release);
43427 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43428
43429 cache = object->cache;
43430 spin_lock(&cache->op_gc_list_lock);
43431 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43432
43433 _debug("GC DEFERRED REL OBJ%x OP%x",
43434 object->debug_id, op->debug_id);
43435 - fscache_stat(&fscache_n_op_gc);
43436 + fscache_stat_unchecked(&fscache_n_op_gc);
43437
43438 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43439
43440 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43441 index 3f7a59b..cf196cc 100644
43442 --- a/fs/fscache/page.c
43443 +++ b/fs/fscache/page.c
43444 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43445 val = radix_tree_lookup(&cookie->stores, page->index);
43446 if (!val) {
43447 rcu_read_unlock();
43448 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43449 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43450 __fscache_uncache_page(cookie, page);
43451 return true;
43452 }
43453 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43454 spin_unlock(&cookie->stores_lock);
43455
43456 if (xpage) {
43457 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43458 - fscache_stat(&fscache_n_store_radix_deletes);
43459 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43460 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43461 ASSERTCMP(xpage, ==, page);
43462 } else {
43463 - fscache_stat(&fscache_n_store_vmscan_gone);
43464 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43465 }
43466
43467 wake_up_bit(&cookie->flags, 0);
43468 @@ -107,7 +107,7 @@ page_busy:
43469 /* we might want to wait here, but that could deadlock the allocator as
43470 * the work threads writing to the cache may all end up sleeping
43471 * on memory allocation */
43472 - fscache_stat(&fscache_n_store_vmscan_busy);
43473 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43474 return false;
43475 }
43476 EXPORT_SYMBOL(__fscache_maybe_release_page);
43477 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43478 FSCACHE_COOKIE_STORING_TAG);
43479 if (!radix_tree_tag_get(&cookie->stores, page->index,
43480 FSCACHE_COOKIE_PENDING_TAG)) {
43481 - fscache_stat(&fscache_n_store_radix_deletes);
43482 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43483 xpage = radix_tree_delete(&cookie->stores, page->index);
43484 }
43485 spin_unlock(&cookie->stores_lock);
43486 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43487
43488 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43489
43490 - fscache_stat(&fscache_n_attr_changed_calls);
43491 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43492
43493 if (fscache_object_is_active(object)) {
43494 fscache_stat(&fscache_n_cop_attr_changed);
43495 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43496
43497 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43498
43499 - fscache_stat(&fscache_n_attr_changed);
43500 + fscache_stat_unchecked(&fscache_n_attr_changed);
43501
43502 op = kzalloc(sizeof(*op), GFP_KERNEL);
43503 if (!op) {
43504 - fscache_stat(&fscache_n_attr_changed_nomem);
43505 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43506 _leave(" = -ENOMEM");
43507 return -ENOMEM;
43508 }
43509 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43510 if (fscache_submit_exclusive_op(object, op) < 0)
43511 goto nobufs;
43512 spin_unlock(&cookie->lock);
43513 - fscache_stat(&fscache_n_attr_changed_ok);
43514 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43515 fscache_put_operation(op);
43516 _leave(" = 0");
43517 return 0;
43518 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43519 nobufs:
43520 spin_unlock(&cookie->lock);
43521 kfree(op);
43522 - fscache_stat(&fscache_n_attr_changed_nobufs);
43523 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43524 _leave(" = %d", -ENOBUFS);
43525 return -ENOBUFS;
43526 }
43527 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43528 /* allocate a retrieval operation and attempt to submit it */
43529 op = kzalloc(sizeof(*op), GFP_NOIO);
43530 if (!op) {
43531 - fscache_stat(&fscache_n_retrievals_nomem);
43532 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43533 return NULL;
43534 }
43535
43536 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43537 return 0;
43538 }
43539
43540 - fscache_stat(&fscache_n_retrievals_wait);
43541 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43542
43543 jif = jiffies;
43544 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43545 fscache_wait_bit_interruptible,
43546 TASK_INTERRUPTIBLE) != 0) {
43547 - fscache_stat(&fscache_n_retrievals_intr);
43548 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43549 _leave(" = -ERESTARTSYS");
43550 return -ERESTARTSYS;
43551 }
43552 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43553 */
43554 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43555 struct fscache_retrieval *op,
43556 - atomic_t *stat_op_waits,
43557 - atomic_t *stat_object_dead)
43558 + atomic_unchecked_t *stat_op_waits,
43559 + atomic_unchecked_t *stat_object_dead)
43560 {
43561 int ret;
43562
43563 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43564 goto check_if_dead;
43565
43566 _debug(">>> WT");
43567 - fscache_stat(stat_op_waits);
43568 + fscache_stat_unchecked(stat_op_waits);
43569 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43570 fscache_wait_bit_interruptible,
43571 TASK_INTERRUPTIBLE) < 0) {
43572 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43573
43574 check_if_dead:
43575 if (unlikely(fscache_object_is_dead(object))) {
43576 - fscache_stat(stat_object_dead);
43577 + fscache_stat_unchecked(stat_object_dead);
43578 return -ENOBUFS;
43579 }
43580 return 0;
43581 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43582
43583 _enter("%p,%p,,,", cookie, page);
43584
43585 - fscache_stat(&fscache_n_retrievals);
43586 + fscache_stat_unchecked(&fscache_n_retrievals);
43587
43588 if (hlist_empty(&cookie->backing_objects))
43589 goto nobufs;
43590 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43591 goto nobufs_unlock;
43592 spin_unlock(&cookie->lock);
43593
43594 - fscache_stat(&fscache_n_retrieval_ops);
43595 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43596
43597 /* pin the netfs read context in case we need to do the actual netfs
43598 * read because we've encountered a cache read failure */
43599 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43600
43601 error:
43602 if (ret == -ENOMEM)
43603 - fscache_stat(&fscache_n_retrievals_nomem);
43604 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43605 else if (ret == -ERESTARTSYS)
43606 - fscache_stat(&fscache_n_retrievals_intr);
43607 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43608 else if (ret == -ENODATA)
43609 - fscache_stat(&fscache_n_retrievals_nodata);
43610 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43611 else if (ret < 0)
43612 - fscache_stat(&fscache_n_retrievals_nobufs);
43613 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43614 else
43615 - fscache_stat(&fscache_n_retrievals_ok);
43616 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43617
43618 fscache_put_retrieval(op);
43619 _leave(" = %d", ret);
43620 @@ -429,7 +429,7 @@ nobufs_unlock:
43621 spin_unlock(&cookie->lock);
43622 kfree(op);
43623 nobufs:
43624 - fscache_stat(&fscache_n_retrievals_nobufs);
43625 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43626 _leave(" = -ENOBUFS");
43627 return -ENOBUFS;
43628 }
43629 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43630
43631 _enter("%p,,%d,,,", cookie, *nr_pages);
43632
43633 - fscache_stat(&fscache_n_retrievals);
43634 + fscache_stat_unchecked(&fscache_n_retrievals);
43635
43636 if (hlist_empty(&cookie->backing_objects))
43637 goto nobufs;
43638 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43639 goto nobufs_unlock;
43640 spin_unlock(&cookie->lock);
43641
43642 - fscache_stat(&fscache_n_retrieval_ops);
43643 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43644
43645 /* pin the netfs read context in case we need to do the actual netfs
43646 * read because we've encountered a cache read failure */
43647 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43648
43649 error:
43650 if (ret == -ENOMEM)
43651 - fscache_stat(&fscache_n_retrievals_nomem);
43652 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43653 else if (ret == -ERESTARTSYS)
43654 - fscache_stat(&fscache_n_retrievals_intr);
43655 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43656 else if (ret == -ENODATA)
43657 - fscache_stat(&fscache_n_retrievals_nodata);
43658 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43659 else if (ret < 0)
43660 - fscache_stat(&fscache_n_retrievals_nobufs);
43661 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43662 else
43663 - fscache_stat(&fscache_n_retrievals_ok);
43664 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43665
43666 fscache_put_retrieval(op);
43667 _leave(" = %d", ret);
43668 @@ -545,7 +545,7 @@ nobufs_unlock:
43669 spin_unlock(&cookie->lock);
43670 kfree(op);
43671 nobufs:
43672 - fscache_stat(&fscache_n_retrievals_nobufs);
43673 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43674 _leave(" = -ENOBUFS");
43675 return -ENOBUFS;
43676 }
43677 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43678
43679 _enter("%p,%p,,,", cookie, page);
43680
43681 - fscache_stat(&fscache_n_allocs);
43682 + fscache_stat_unchecked(&fscache_n_allocs);
43683
43684 if (hlist_empty(&cookie->backing_objects))
43685 goto nobufs;
43686 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43687 goto nobufs_unlock;
43688 spin_unlock(&cookie->lock);
43689
43690 - fscache_stat(&fscache_n_alloc_ops);
43691 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43692
43693 ret = fscache_wait_for_retrieval_activation(
43694 object, op,
43695 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43696
43697 error:
43698 if (ret == -ERESTARTSYS)
43699 - fscache_stat(&fscache_n_allocs_intr);
43700 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43701 else if (ret < 0)
43702 - fscache_stat(&fscache_n_allocs_nobufs);
43703 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43704 else
43705 - fscache_stat(&fscache_n_allocs_ok);
43706 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43707
43708 fscache_put_retrieval(op);
43709 _leave(" = %d", ret);
43710 @@ -625,7 +625,7 @@ nobufs_unlock:
43711 spin_unlock(&cookie->lock);
43712 kfree(op);
43713 nobufs:
43714 - fscache_stat(&fscache_n_allocs_nobufs);
43715 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43716 _leave(" = -ENOBUFS");
43717 return -ENOBUFS;
43718 }
43719 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43720
43721 spin_lock(&cookie->stores_lock);
43722
43723 - fscache_stat(&fscache_n_store_calls);
43724 + fscache_stat_unchecked(&fscache_n_store_calls);
43725
43726 /* find a page to store */
43727 page = NULL;
43728 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43729 page = results[0];
43730 _debug("gang %d [%lx]", n, page->index);
43731 if (page->index > op->store_limit) {
43732 - fscache_stat(&fscache_n_store_pages_over_limit);
43733 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43734 goto superseded;
43735 }
43736
43737 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43738 spin_unlock(&cookie->stores_lock);
43739 spin_unlock(&object->lock);
43740
43741 - fscache_stat(&fscache_n_store_pages);
43742 + fscache_stat_unchecked(&fscache_n_store_pages);
43743 fscache_stat(&fscache_n_cop_write_page);
43744 ret = object->cache->ops->write_page(op, page);
43745 fscache_stat_d(&fscache_n_cop_write_page);
43746 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43747 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43748 ASSERT(PageFsCache(page));
43749
43750 - fscache_stat(&fscache_n_stores);
43751 + fscache_stat_unchecked(&fscache_n_stores);
43752
43753 op = kzalloc(sizeof(*op), GFP_NOIO);
43754 if (!op)
43755 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43756 spin_unlock(&cookie->stores_lock);
43757 spin_unlock(&object->lock);
43758
43759 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43760 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43761 op->store_limit = object->store_limit;
43762
43763 if (fscache_submit_op(object, &op->op) < 0)
43764 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43765
43766 spin_unlock(&cookie->lock);
43767 radix_tree_preload_end();
43768 - fscache_stat(&fscache_n_store_ops);
43769 - fscache_stat(&fscache_n_stores_ok);
43770 + fscache_stat_unchecked(&fscache_n_store_ops);
43771 + fscache_stat_unchecked(&fscache_n_stores_ok);
43772
43773 /* the work queue now carries its own ref on the object */
43774 fscache_put_operation(&op->op);
43775 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43776 return 0;
43777
43778 already_queued:
43779 - fscache_stat(&fscache_n_stores_again);
43780 + fscache_stat_unchecked(&fscache_n_stores_again);
43781 already_pending:
43782 spin_unlock(&cookie->stores_lock);
43783 spin_unlock(&object->lock);
43784 spin_unlock(&cookie->lock);
43785 radix_tree_preload_end();
43786 kfree(op);
43787 - fscache_stat(&fscache_n_stores_ok);
43788 + fscache_stat_unchecked(&fscache_n_stores_ok);
43789 _leave(" = 0");
43790 return 0;
43791
43792 @@ -851,14 +851,14 @@ nobufs:
43793 spin_unlock(&cookie->lock);
43794 radix_tree_preload_end();
43795 kfree(op);
43796 - fscache_stat(&fscache_n_stores_nobufs);
43797 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43798 _leave(" = -ENOBUFS");
43799 return -ENOBUFS;
43800
43801 nomem_free:
43802 kfree(op);
43803 nomem:
43804 - fscache_stat(&fscache_n_stores_oom);
43805 + fscache_stat_unchecked(&fscache_n_stores_oom);
43806 _leave(" = -ENOMEM");
43807 return -ENOMEM;
43808 }
43809 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43810 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43811 ASSERTCMP(page, !=, NULL);
43812
43813 - fscache_stat(&fscache_n_uncaches);
43814 + fscache_stat_unchecked(&fscache_n_uncaches);
43815
43816 /* cache withdrawal may beat us to it */
43817 if (!PageFsCache(page))
43818 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43819 unsigned long loop;
43820
43821 #ifdef CONFIG_FSCACHE_STATS
43822 - atomic_add(pagevec->nr, &fscache_n_marks);
43823 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43824 #endif
43825
43826 for (loop = 0; loop < pagevec->nr; loop++) {
43827 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43828 index 4765190..2a067f2 100644
43829 --- a/fs/fscache/stats.c
43830 +++ b/fs/fscache/stats.c
43831 @@ -18,95 +18,95 @@
43832 /*
43833 * operation counters
43834 */
43835 -atomic_t fscache_n_op_pend;
43836 -atomic_t fscache_n_op_run;
43837 -atomic_t fscache_n_op_enqueue;
43838 -atomic_t fscache_n_op_requeue;
43839 -atomic_t fscache_n_op_deferred_release;
43840 -atomic_t fscache_n_op_release;
43841 -atomic_t fscache_n_op_gc;
43842 -atomic_t fscache_n_op_cancelled;
43843 -atomic_t fscache_n_op_rejected;
43844 +atomic_unchecked_t fscache_n_op_pend;
43845 +atomic_unchecked_t fscache_n_op_run;
43846 +atomic_unchecked_t fscache_n_op_enqueue;
43847 +atomic_unchecked_t fscache_n_op_requeue;
43848 +atomic_unchecked_t fscache_n_op_deferred_release;
43849 +atomic_unchecked_t fscache_n_op_release;
43850 +atomic_unchecked_t fscache_n_op_gc;
43851 +atomic_unchecked_t fscache_n_op_cancelled;
43852 +atomic_unchecked_t fscache_n_op_rejected;
43853
43854 -atomic_t fscache_n_attr_changed;
43855 -atomic_t fscache_n_attr_changed_ok;
43856 -atomic_t fscache_n_attr_changed_nobufs;
43857 -atomic_t fscache_n_attr_changed_nomem;
43858 -atomic_t fscache_n_attr_changed_calls;
43859 +atomic_unchecked_t fscache_n_attr_changed;
43860 +atomic_unchecked_t fscache_n_attr_changed_ok;
43861 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43862 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43863 +atomic_unchecked_t fscache_n_attr_changed_calls;
43864
43865 -atomic_t fscache_n_allocs;
43866 -atomic_t fscache_n_allocs_ok;
43867 -atomic_t fscache_n_allocs_wait;
43868 -atomic_t fscache_n_allocs_nobufs;
43869 -atomic_t fscache_n_allocs_intr;
43870 -atomic_t fscache_n_allocs_object_dead;
43871 -atomic_t fscache_n_alloc_ops;
43872 -atomic_t fscache_n_alloc_op_waits;
43873 +atomic_unchecked_t fscache_n_allocs;
43874 +atomic_unchecked_t fscache_n_allocs_ok;
43875 +atomic_unchecked_t fscache_n_allocs_wait;
43876 +atomic_unchecked_t fscache_n_allocs_nobufs;
43877 +atomic_unchecked_t fscache_n_allocs_intr;
43878 +atomic_unchecked_t fscache_n_allocs_object_dead;
43879 +atomic_unchecked_t fscache_n_alloc_ops;
43880 +atomic_unchecked_t fscache_n_alloc_op_waits;
43881
43882 -atomic_t fscache_n_retrievals;
43883 -atomic_t fscache_n_retrievals_ok;
43884 -atomic_t fscache_n_retrievals_wait;
43885 -atomic_t fscache_n_retrievals_nodata;
43886 -atomic_t fscache_n_retrievals_nobufs;
43887 -atomic_t fscache_n_retrievals_intr;
43888 -atomic_t fscache_n_retrievals_nomem;
43889 -atomic_t fscache_n_retrievals_object_dead;
43890 -atomic_t fscache_n_retrieval_ops;
43891 -atomic_t fscache_n_retrieval_op_waits;
43892 +atomic_unchecked_t fscache_n_retrievals;
43893 +atomic_unchecked_t fscache_n_retrievals_ok;
43894 +atomic_unchecked_t fscache_n_retrievals_wait;
43895 +atomic_unchecked_t fscache_n_retrievals_nodata;
43896 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43897 +atomic_unchecked_t fscache_n_retrievals_intr;
43898 +atomic_unchecked_t fscache_n_retrievals_nomem;
43899 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43900 +atomic_unchecked_t fscache_n_retrieval_ops;
43901 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43902
43903 -atomic_t fscache_n_stores;
43904 -atomic_t fscache_n_stores_ok;
43905 -atomic_t fscache_n_stores_again;
43906 -atomic_t fscache_n_stores_nobufs;
43907 -atomic_t fscache_n_stores_oom;
43908 -atomic_t fscache_n_store_ops;
43909 -atomic_t fscache_n_store_calls;
43910 -atomic_t fscache_n_store_pages;
43911 -atomic_t fscache_n_store_radix_deletes;
43912 -atomic_t fscache_n_store_pages_over_limit;
43913 +atomic_unchecked_t fscache_n_stores;
43914 +atomic_unchecked_t fscache_n_stores_ok;
43915 +atomic_unchecked_t fscache_n_stores_again;
43916 +atomic_unchecked_t fscache_n_stores_nobufs;
43917 +atomic_unchecked_t fscache_n_stores_oom;
43918 +atomic_unchecked_t fscache_n_store_ops;
43919 +atomic_unchecked_t fscache_n_store_calls;
43920 +atomic_unchecked_t fscache_n_store_pages;
43921 +atomic_unchecked_t fscache_n_store_radix_deletes;
43922 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43923
43924 -atomic_t fscache_n_store_vmscan_not_storing;
43925 -atomic_t fscache_n_store_vmscan_gone;
43926 -atomic_t fscache_n_store_vmscan_busy;
43927 -atomic_t fscache_n_store_vmscan_cancelled;
43928 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43929 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43930 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43931 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43932
43933 -atomic_t fscache_n_marks;
43934 -atomic_t fscache_n_uncaches;
43935 +atomic_unchecked_t fscache_n_marks;
43936 +atomic_unchecked_t fscache_n_uncaches;
43937
43938 -atomic_t fscache_n_acquires;
43939 -atomic_t fscache_n_acquires_null;
43940 -atomic_t fscache_n_acquires_no_cache;
43941 -atomic_t fscache_n_acquires_ok;
43942 -atomic_t fscache_n_acquires_nobufs;
43943 -atomic_t fscache_n_acquires_oom;
43944 +atomic_unchecked_t fscache_n_acquires;
43945 +atomic_unchecked_t fscache_n_acquires_null;
43946 +atomic_unchecked_t fscache_n_acquires_no_cache;
43947 +atomic_unchecked_t fscache_n_acquires_ok;
43948 +atomic_unchecked_t fscache_n_acquires_nobufs;
43949 +atomic_unchecked_t fscache_n_acquires_oom;
43950
43951 -atomic_t fscache_n_updates;
43952 -atomic_t fscache_n_updates_null;
43953 -atomic_t fscache_n_updates_run;
43954 +atomic_unchecked_t fscache_n_updates;
43955 +atomic_unchecked_t fscache_n_updates_null;
43956 +atomic_unchecked_t fscache_n_updates_run;
43957
43958 -atomic_t fscache_n_relinquishes;
43959 -atomic_t fscache_n_relinquishes_null;
43960 -atomic_t fscache_n_relinquishes_waitcrt;
43961 -atomic_t fscache_n_relinquishes_retire;
43962 +atomic_unchecked_t fscache_n_relinquishes;
43963 +atomic_unchecked_t fscache_n_relinquishes_null;
43964 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43965 +atomic_unchecked_t fscache_n_relinquishes_retire;
43966
43967 -atomic_t fscache_n_cookie_index;
43968 -atomic_t fscache_n_cookie_data;
43969 -atomic_t fscache_n_cookie_special;
43970 +atomic_unchecked_t fscache_n_cookie_index;
43971 +atomic_unchecked_t fscache_n_cookie_data;
43972 +atomic_unchecked_t fscache_n_cookie_special;
43973
43974 -atomic_t fscache_n_object_alloc;
43975 -atomic_t fscache_n_object_no_alloc;
43976 -atomic_t fscache_n_object_lookups;
43977 -atomic_t fscache_n_object_lookups_negative;
43978 -atomic_t fscache_n_object_lookups_positive;
43979 -atomic_t fscache_n_object_lookups_timed_out;
43980 -atomic_t fscache_n_object_created;
43981 -atomic_t fscache_n_object_avail;
43982 -atomic_t fscache_n_object_dead;
43983 +atomic_unchecked_t fscache_n_object_alloc;
43984 +atomic_unchecked_t fscache_n_object_no_alloc;
43985 +atomic_unchecked_t fscache_n_object_lookups;
43986 +atomic_unchecked_t fscache_n_object_lookups_negative;
43987 +atomic_unchecked_t fscache_n_object_lookups_positive;
43988 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43989 +atomic_unchecked_t fscache_n_object_created;
43990 +atomic_unchecked_t fscache_n_object_avail;
43991 +atomic_unchecked_t fscache_n_object_dead;
43992
43993 -atomic_t fscache_n_checkaux_none;
43994 -atomic_t fscache_n_checkaux_okay;
43995 -atomic_t fscache_n_checkaux_update;
43996 -atomic_t fscache_n_checkaux_obsolete;
43997 +atomic_unchecked_t fscache_n_checkaux_none;
43998 +atomic_unchecked_t fscache_n_checkaux_okay;
43999 +atomic_unchecked_t fscache_n_checkaux_update;
44000 +atomic_unchecked_t fscache_n_checkaux_obsolete;
44001
44002 atomic_t fscache_n_cop_alloc_object;
44003 atomic_t fscache_n_cop_lookup_object;
44004 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
44005 seq_puts(m, "FS-Cache statistics\n");
44006
44007 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
44008 - atomic_read(&fscache_n_cookie_index),
44009 - atomic_read(&fscache_n_cookie_data),
44010 - atomic_read(&fscache_n_cookie_special));
44011 + atomic_read_unchecked(&fscache_n_cookie_index),
44012 + atomic_read_unchecked(&fscache_n_cookie_data),
44013 + atomic_read_unchecked(&fscache_n_cookie_special));
44014
44015 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
44016 - atomic_read(&fscache_n_object_alloc),
44017 - atomic_read(&fscache_n_object_no_alloc),
44018 - atomic_read(&fscache_n_object_avail),
44019 - atomic_read(&fscache_n_object_dead));
44020 + atomic_read_unchecked(&fscache_n_object_alloc),
44021 + atomic_read_unchecked(&fscache_n_object_no_alloc),
44022 + atomic_read_unchecked(&fscache_n_object_avail),
44023 + atomic_read_unchecked(&fscache_n_object_dead));
44024 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44025 - atomic_read(&fscache_n_checkaux_none),
44026 - atomic_read(&fscache_n_checkaux_okay),
44027 - atomic_read(&fscache_n_checkaux_update),
44028 - atomic_read(&fscache_n_checkaux_obsolete));
44029 + atomic_read_unchecked(&fscache_n_checkaux_none),
44030 + atomic_read_unchecked(&fscache_n_checkaux_okay),
44031 + atomic_read_unchecked(&fscache_n_checkaux_update),
44032 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44033
44034 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44035 - atomic_read(&fscache_n_marks),
44036 - atomic_read(&fscache_n_uncaches));
44037 + atomic_read_unchecked(&fscache_n_marks),
44038 + atomic_read_unchecked(&fscache_n_uncaches));
44039
44040 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44041 " oom=%u\n",
44042 - atomic_read(&fscache_n_acquires),
44043 - atomic_read(&fscache_n_acquires_null),
44044 - atomic_read(&fscache_n_acquires_no_cache),
44045 - atomic_read(&fscache_n_acquires_ok),
44046 - atomic_read(&fscache_n_acquires_nobufs),
44047 - atomic_read(&fscache_n_acquires_oom));
44048 + atomic_read_unchecked(&fscache_n_acquires),
44049 + atomic_read_unchecked(&fscache_n_acquires_null),
44050 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
44051 + atomic_read_unchecked(&fscache_n_acquires_ok),
44052 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
44053 + atomic_read_unchecked(&fscache_n_acquires_oom));
44054
44055 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44056 - atomic_read(&fscache_n_object_lookups),
44057 - atomic_read(&fscache_n_object_lookups_negative),
44058 - atomic_read(&fscache_n_object_lookups_positive),
44059 - atomic_read(&fscache_n_object_created),
44060 - atomic_read(&fscache_n_object_lookups_timed_out));
44061 + atomic_read_unchecked(&fscache_n_object_lookups),
44062 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
44063 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
44064 + atomic_read_unchecked(&fscache_n_object_created),
44065 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44066
44067 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44068 - atomic_read(&fscache_n_updates),
44069 - atomic_read(&fscache_n_updates_null),
44070 - atomic_read(&fscache_n_updates_run));
44071 + atomic_read_unchecked(&fscache_n_updates),
44072 + atomic_read_unchecked(&fscache_n_updates_null),
44073 + atomic_read_unchecked(&fscache_n_updates_run));
44074
44075 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44076 - atomic_read(&fscache_n_relinquishes),
44077 - atomic_read(&fscache_n_relinquishes_null),
44078 - atomic_read(&fscache_n_relinquishes_waitcrt),
44079 - atomic_read(&fscache_n_relinquishes_retire));
44080 + atomic_read_unchecked(&fscache_n_relinquishes),
44081 + atomic_read_unchecked(&fscache_n_relinquishes_null),
44082 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44083 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
44084
44085 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44086 - atomic_read(&fscache_n_attr_changed),
44087 - atomic_read(&fscache_n_attr_changed_ok),
44088 - atomic_read(&fscache_n_attr_changed_nobufs),
44089 - atomic_read(&fscache_n_attr_changed_nomem),
44090 - atomic_read(&fscache_n_attr_changed_calls));
44091 + atomic_read_unchecked(&fscache_n_attr_changed),
44092 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
44093 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44094 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44095 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
44096
44097 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44098 - atomic_read(&fscache_n_allocs),
44099 - atomic_read(&fscache_n_allocs_ok),
44100 - atomic_read(&fscache_n_allocs_wait),
44101 - atomic_read(&fscache_n_allocs_nobufs),
44102 - atomic_read(&fscache_n_allocs_intr));
44103 + atomic_read_unchecked(&fscache_n_allocs),
44104 + atomic_read_unchecked(&fscache_n_allocs_ok),
44105 + atomic_read_unchecked(&fscache_n_allocs_wait),
44106 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
44107 + atomic_read_unchecked(&fscache_n_allocs_intr));
44108 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44109 - atomic_read(&fscache_n_alloc_ops),
44110 - atomic_read(&fscache_n_alloc_op_waits),
44111 - atomic_read(&fscache_n_allocs_object_dead));
44112 + atomic_read_unchecked(&fscache_n_alloc_ops),
44113 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
44114 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
44115
44116 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44117 " int=%u oom=%u\n",
44118 - atomic_read(&fscache_n_retrievals),
44119 - atomic_read(&fscache_n_retrievals_ok),
44120 - atomic_read(&fscache_n_retrievals_wait),
44121 - atomic_read(&fscache_n_retrievals_nodata),
44122 - atomic_read(&fscache_n_retrievals_nobufs),
44123 - atomic_read(&fscache_n_retrievals_intr),
44124 - atomic_read(&fscache_n_retrievals_nomem));
44125 + atomic_read_unchecked(&fscache_n_retrievals),
44126 + atomic_read_unchecked(&fscache_n_retrievals_ok),
44127 + atomic_read_unchecked(&fscache_n_retrievals_wait),
44128 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
44129 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44130 + atomic_read_unchecked(&fscache_n_retrievals_intr),
44131 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
44132 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44133 - atomic_read(&fscache_n_retrieval_ops),
44134 - atomic_read(&fscache_n_retrieval_op_waits),
44135 - atomic_read(&fscache_n_retrievals_object_dead));
44136 + atomic_read_unchecked(&fscache_n_retrieval_ops),
44137 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44138 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44139
44140 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44141 - atomic_read(&fscache_n_stores),
44142 - atomic_read(&fscache_n_stores_ok),
44143 - atomic_read(&fscache_n_stores_again),
44144 - atomic_read(&fscache_n_stores_nobufs),
44145 - atomic_read(&fscache_n_stores_oom));
44146 + atomic_read_unchecked(&fscache_n_stores),
44147 + atomic_read_unchecked(&fscache_n_stores_ok),
44148 + atomic_read_unchecked(&fscache_n_stores_again),
44149 + atomic_read_unchecked(&fscache_n_stores_nobufs),
44150 + atomic_read_unchecked(&fscache_n_stores_oom));
44151 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44152 - atomic_read(&fscache_n_store_ops),
44153 - atomic_read(&fscache_n_store_calls),
44154 - atomic_read(&fscache_n_store_pages),
44155 - atomic_read(&fscache_n_store_radix_deletes),
44156 - atomic_read(&fscache_n_store_pages_over_limit));
44157 + atomic_read_unchecked(&fscache_n_store_ops),
44158 + atomic_read_unchecked(&fscache_n_store_calls),
44159 + atomic_read_unchecked(&fscache_n_store_pages),
44160 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
44161 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44162
44163 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44164 - atomic_read(&fscache_n_store_vmscan_not_storing),
44165 - atomic_read(&fscache_n_store_vmscan_gone),
44166 - atomic_read(&fscache_n_store_vmscan_busy),
44167 - atomic_read(&fscache_n_store_vmscan_cancelled));
44168 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44169 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44170 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44171 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44172
44173 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44174 - atomic_read(&fscache_n_op_pend),
44175 - atomic_read(&fscache_n_op_run),
44176 - atomic_read(&fscache_n_op_enqueue),
44177 - atomic_read(&fscache_n_op_cancelled),
44178 - atomic_read(&fscache_n_op_rejected));
44179 + atomic_read_unchecked(&fscache_n_op_pend),
44180 + atomic_read_unchecked(&fscache_n_op_run),
44181 + atomic_read_unchecked(&fscache_n_op_enqueue),
44182 + atomic_read_unchecked(&fscache_n_op_cancelled),
44183 + atomic_read_unchecked(&fscache_n_op_rejected));
44184 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44185 - atomic_read(&fscache_n_op_deferred_release),
44186 - atomic_read(&fscache_n_op_release),
44187 - atomic_read(&fscache_n_op_gc));
44188 + atomic_read_unchecked(&fscache_n_op_deferred_release),
44189 + atomic_read_unchecked(&fscache_n_op_release),
44190 + atomic_read_unchecked(&fscache_n_op_gc));
44191
44192 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44193 atomic_read(&fscache_n_cop_alloc_object),
44194 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44195 index 3426521..3b75162 100644
44196 --- a/fs/fuse/cuse.c
44197 +++ b/fs/fuse/cuse.c
44198 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
44199 INIT_LIST_HEAD(&cuse_conntbl[i]);
44200
44201 /* inherit and extend fuse_dev_operations */
44202 - cuse_channel_fops = fuse_dev_operations;
44203 - cuse_channel_fops.owner = THIS_MODULE;
44204 - cuse_channel_fops.open = cuse_channel_open;
44205 - cuse_channel_fops.release = cuse_channel_release;
44206 + pax_open_kernel();
44207 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44208 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44209 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
44210 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
44211 + pax_close_kernel();
44212
44213 cuse_class = class_create(THIS_MODULE, "cuse");
44214 if (IS_ERR(cuse_class))
44215 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44216 index 2aaf3ea..8e50863 100644
44217 --- a/fs/fuse/dev.c
44218 +++ b/fs/fuse/dev.c
44219 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44220 ret = 0;
44221 pipe_lock(pipe);
44222
44223 - if (!pipe->readers) {
44224 + if (!atomic_read(&pipe->readers)) {
44225 send_sig(SIGPIPE, current, 0);
44226 if (!ret)
44227 ret = -EPIPE;
44228 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44229 index 9f63e49..d8a64c0 100644
44230 --- a/fs/fuse/dir.c
44231 +++ b/fs/fuse/dir.c
44232 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44233 return link;
44234 }
44235
44236 -static void free_link(char *link)
44237 +static void free_link(const char *link)
44238 {
44239 if (!IS_ERR(link))
44240 free_page((unsigned long) link);
44241 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44242 index cfd4959..a780959 100644
44243 --- a/fs/gfs2/inode.c
44244 +++ b/fs/gfs2/inode.c
44245 @@ -1490,7 +1490,7 @@ out:
44246
44247 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44248 {
44249 - char *s = nd_get_link(nd);
44250 + const char *s = nd_get_link(nd);
44251 if (!IS_ERR(s))
44252 kfree(s);
44253 }
44254 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44255 index 0be5a78..9cfb853 100644
44256 --- a/fs/hugetlbfs/inode.c
44257 +++ b/fs/hugetlbfs/inode.c
44258 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44259 .kill_sb = kill_litter_super,
44260 };
44261
44262 -static struct vfsmount *hugetlbfs_vfsmount;
44263 +struct vfsmount *hugetlbfs_vfsmount;
44264
44265 static int can_do_hugetlb_shm(void)
44266 {
44267 diff --git a/fs/inode.c b/fs/inode.c
44268 index ee4e66b..0451521 100644
44269 --- a/fs/inode.c
44270 +++ b/fs/inode.c
44271 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44272
44273 #ifdef CONFIG_SMP
44274 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44275 - static atomic_t shared_last_ino;
44276 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44277 + static atomic_unchecked_t shared_last_ino;
44278 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44279
44280 res = next - LAST_INO_BATCH;
44281 }
44282 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44283 index e513f19..2ab1351 100644
44284 --- a/fs/jffs2/erase.c
44285 +++ b/fs/jffs2/erase.c
44286 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44287 struct jffs2_unknown_node marker = {
44288 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44289 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44290 - .totlen = cpu_to_je32(c->cleanmarker_size)
44291 + .totlen = cpu_to_je32(c->cleanmarker_size),
44292 + .hdr_crc = cpu_to_je32(0)
44293 };
44294
44295 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44296 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44297 index b09e51d..e482afa 100644
44298 --- a/fs/jffs2/wbuf.c
44299 +++ b/fs/jffs2/wbuf.c
44300 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44301 {
44302 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44303 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44304 - .totlen = constant_cpu_to_je32(8)
44305 + .totlen = constant_cpu_to_je32(8),
44306 + .hdr_crc = constant_cpu_to_je32(0)
44307 };
44308
44309 /*
44310 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44311 index a44eff0..462e07d 100644
44312 --- a/fs/jfs/super.c
44313 +++ b/fs/jfs/super.c
44314 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44315
44316 jfs_inode_cachep =
44317 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44318 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44319 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44320 init_once);
44321 if (jfs_inode_cachep == NULL)
44322 return -ENOMEM;
44323 diff --git a/fs/libfs.c b/fs/libfs.c
44324 index f6d411e..e82a08d 100644
44325 --- a/fs/libfs.c
44326 +++ b/fs/libfs.c
44327 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44328
44329 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44330 struct dentry *next;
44331 + char d_name[sizeof(next->d_iname)];
44332 + const unsigned char *name;
44333 +
44334 next = list_entry(p, struct dentry, d_u.d_child);
44335 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44336 if (!simple_positive(next)) {
44337 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44338
44339 spin_unlock(&next->d_lock);
44340 spin_unlock(&dentry->d_lock);
44341 - if (filldir(dirent, next->d_name.name,
44342 + name = next->d_name.name;
44343 + if (name == next->d_iname) {
44344 + memcpy(d_name, name, next->d_name.len);
44345 + name = d_name;
44346 + }
44347 + if (filldir(dirent, name,
44348 next->d_name.len, filp->f_pos,
44349 next->d_inode->i_ino,
44350 dt_type(next->d_inode)) < 0)
44351 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44352 index 8392cb8..80d6193 100644
44353 --- a/fs/lockd/clntproc.c
44354 +++ b/fs/lockd/clntproc.c
44355 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44356 /*
44357 * Cookie counter for NLM requests
44358 */
44359 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44360 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44361
44362 void nlmclnt_next_cookie(struct nlm_cookie *c)
44363 {
44364 - u32 cookie = atomic_inc_return(&nlm_cookie);
44365 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44366
44367 memcpy(c->data, &cookie, 4);
44368 c->len=4;
44369 diff --git a/fs/locks.c b/fs/locks.c
44370 index 637694b..f84a121 100644
44371 --- a/fs/locks.c
44372 +++ b/fs/locks.c
44373 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44374 return;
44375
44376 if (filp->f_op && filp->f_op->flock) {
44377 - struct file_lock fl = {
44378 + struct file_lock flock = {
44379 .fl_pid = current->tgid,
44380 .fl_file = filp,
44381 .fl_flags = FL_FLOCK,
44382 .fl_type = F_UNLCK,
44383 .fl_end = OFFSET_MAX,
44384 };
44385 - filp->f_op->flock(filp, F_SETLKW, &fl);
44386 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44387 - fl.fl_ops->fl_release_private(&fl);
44388 + filp->f_op->flock(filp, F_SETLKW, &flock);
44389 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44390 + flock.fl_ops->fl_release_private(&flock);
44391 }
44392
44393 lock_flocks();
44394 diff --git a/fs/namei.c b/fs/namei.c
44395 index 744e942..24ef47f 100644
44396 --- a/fs/namei.c
44397 +++ b/fs/namei.c
44398 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44399 if (ret != -EACCES)
44400 return ret;
44401
44402 +#ifdef CONFIG_GRKERNSEC
44403 + /* we'll block if we have to log due to a denied capability use */
44404 + if (mask & MAY_NOT_BLOCK)
44405 + return -ECHILD;
44406 +#endif
44407 +
44408 if (S_ISDIR(inode->i_mode)) {
44409 /* DACs are overridable for directories */
44410 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44411 - return 0;
44412 if (!(mask & MAY_WRITE))
44413 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44414 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44415 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44416 return 0;
44417 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44418 + return 0;
44419 return -EACCES;
44420 }
44421 /*
44422 + * Searching includes executable on directories, else just read.
44423 + */
44424 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44425 + if (mask == MAY_READ)
44426 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44427 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44428 + return 0;
44429 +
44430 + /*
44431 * Read/write DACs are always overridable.
44432 * Executable DACs are overridable when there is
44433 * at least one exec bit set.
44434 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44435 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44436 return 0;
44437
44438 - /*
44439 - * Searching includes executable on directories, else just read.
44440 - */
44441 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44442 - if (mask == MAY_READ)
44443 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44444 - return 0;
44445 -
44446 return -EACCES;
44447 }
44448
44449 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44450 return error;
44451 }
44452
44453 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44454 + dentry->d_inode, dentry, nd->path.mnt)) {
44455 + error = -EACCES;
44456 + *p = ERR_PTR(error); /* no ->put_link(), please */
44457 + path_put(&nd->path);
44458 + return error;
44459 + }
44460 +
44461 nd->last_type = LAST_BIND;
44462 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44463 error = PTR_ERR(*p);
44464 if (!IS_ERR(*p)) {
44465 - char *s = nd_get_link(nd);
44466 + const char *s = nd_get_link(nd);
44467 error = 0;
44468 if (s)
44469 error = __vfs_follow_link(nd, s);
44470 @@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
44471 if (!err)
44472 err = complete_walk(nd);
44473
44474 + if (!(nd->flags & LOOKUP_PARENT)) {
44475 +#ifdef CONFIG_GRKERNSEC
44476 + if (flags & LOOKUP_RCU) {
44477 + if (!err)
44478 + path_put(&nd->path);
44479 + err = -ECHILD;
44480 + } else
44481 +#endif
44482 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44483 + if (!err)
44484 + path_put(&nd->path);
44485 + err = -ENOENT;
44486 + }
44487 + }
44488 +
44489 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44490 if (!nd->inode->i_op->lookup) {
44491 path_put(&nd->path);
44492 @@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
44493 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44494
44495 if (likely(!retval)) {
44496 + if (*name != '/' && nd->path.dentry && nd->inode) {
44497 +#ifdef CONFIG_GRKERNSEC
44498 + if (flags & LOOKUP_RCU)
44499 + return -ECHILD;
44500 +#endif
44501 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44502 + return -ENOENT;
44503 + }
44504 +
44505 if (unlikely(!audit_dummy_context())) {
44506 if (nd->path.dentry && nd->inode)
44507 audit_inode(name, nd->path.dentry);
44508 @@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44509 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44510 return -EPERM;
44511
44512 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44513 + return -EPERM;
44514 + if (gr_handle_rawio(inode))
44515 + return -EPERM;
44516 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44517 + return -EACCES;
44518 +
44519 return 0;
44520 }
44521
44522 @@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44523 error = complete_walk(nd);
44524 if (error)
44525 return ERR_PTR(error);
44526 +#ifdef CONFIG_GRKERNSEC
44527 + if (nd->flags & LOOKUP_RCU) {
44528 + error = -ECHILD;
44529 + goto exit;
44530 + }
44531 +#endif
44532 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44533 + error = -ENOENT;
44534 + goto exit;
44535 + }
44536 audit_inode(pathname, nd->path.dentry);
44537 if (open_flag & O_CREAT) {
44538 error = -EISDIR;
44539 @@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44540 error = complete_walk(nd);
44541 if (error)
44542 return ERR_PTR(error);
44543 +#ifdef CONFIG_GRKERNSEC
44544 + if (nd->flags & LOOKUP_RCU) {
44545 + error = -ECHILD;
44546 + goto exit;
44547 + }
44548 +#endif
44549 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44550 + error = -ENOENT;
44551 + goto exit;
44552 + }
44553 audit_inode(pathname, dir);
44554 goto ok;
44555 }
44556 @@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44557 error = complete_walk(nd);
44558 if (error)
44559 return ERR_PTR(-ECHILD);
44560 +#ifdef CONFIG_GRKERNSEC
44561 + if (nd->flags & LOOKUP_RCU) {
44562 + error = -ECHILD;
44563 + goto exit;
44564 + }
44565 +#endif
44566 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44567 + error = -ENOENT;
44568 + goto exit;
44569 + }
44570
44571 error = -ENOTDIR;
44572 if (nd->flags & LOOKUP_DIRECTORY) {
44573 @@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44574 /* Negative dentry, just create the file */
44575 if (!dentry->d_inode) {
44576 int mode = op->mode;
44577 +
44578 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44579 + error = -EACCES;
44580 + goto exit_mutex_unlock;
44581 + }
44582 +
44583 if (!IS_POSIXACL(dir->d_inode))
44584 mode &= ~current_umask();
44585 /*
44586 @@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44587 error = vfs_create(dir->d_inode, dentry, mode, nd);
44588 if (error)
44589 goto exit_mutex_unlock;
44590 + else
44591 + gr_handle_create(path->dentry, path->mnt);
44592 mutex_unlock(&dir->d_inode->i_mutex);
44593 dput(nd->path.dentry);
44594 nd->path.dentry = dentry;
44595 @@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44596 /*
44597 * It already exists.
44598 */
44599 +
44600 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44601 + error = -ENOENT;
44602 + goto exit_mutex_unlock;
44603 + }
44604 +
44605 + /* only check if O_CREAT is specified, all other checks need to go
44606 + into may_open */
44607 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44608 + error = -EACCES;
44609 + goto exit_mutex_unlock;
44610 + }
44611 +
44612 mutex_unlock(&dir->d_inode->i_mutex);
44613 audit_inode(pathname, path->dentry);
44614
44615 @@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44616 *path = nd.path;
44617 return dentry;
44618 eexist:
44619 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44620 + dput(dentry);
44621 + dentry = ERR_PTR(-ENOENT);
44622 + goto fail;
44623 + }
44624 dput(dentry);
44625 dentry = ERR_PTR(-EEXIST);
44626 fail:
44627 @@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44628 }
44629 EXPORT_SYMBOL(user_path_create);
44630
44631 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44632 +{
44633 + char *tmp = getname(pathname);
44634 + struct dentry *res;
44635 + if (IS_ERR(tmp))
44636 + return ERR_CAST(tmp);
44637 + res = kern_path_create(dfd, tmp, path, is_dir);
44638 + if (IS_ERR(res))
44639 + putname(tmp);
44640 + else
44641 + *to = tmp;
44642 + return res;
44643 +}
44644 +
44645 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44646 {
44647 int error = may_create(dir, dentry);
44648 @@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44649 error = mnt_want_write(path.mnt);
44650 if (error)
44651 goto out_dput;
44652 +
44653 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44654 + error = -EPERM;
44655 + goto out_drop_write;
44656 + }
44657 +
44658 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44659 + error = -EACCES;
44660 + goto out_drop_write;
44661 + }
44662 +
44663 error = security_path_mknod(&path, dentry, mode, dev);
44664 if (error)
44665 goto out_drop_write;
44666 @@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44667 }
44668 out_drop_write:
44669 mnt_drop_write(path.mnt);
44670 +
44671 + if (!error)
44672 + gr_handle_create(dentry, path.mnt);
44673 out_dput:
44674 dput(dentry);
44675 mutex_unlock(&path.dentry->d_inode->i_mutex);
44676 @@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44677 error = mnt_want_write(path.mnt);
44678 if (error)
44679 goto out_dput;
44680 +
44681 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44682 + error = -EACCES;
44683 + goto out_drop_write;
44684 + }
44685 +
44686 error = security_path_mkdir(&path, dentry, mode);
44687 if (error)
44688 goto out_drop_write;
44689 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44690 out_drop_write:
44691 mnt_drop_write(path.mnt);
44692 +
44693 + if (!error)
44694 + gr_handle_create(dentry, path.mnt);
44695 out_dput:
44696 dput(dentry);
44697 mutex_unlock(&path.dentry->d_inode->i_mutex);
44698 @@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44699 char * name;
44700 struct dentry *dentry;
44701 struct nameidata nd;
44702 + ino_t saved_ino = 0;
44703 + dev_t saved_dev = 0;
44704
44705 error = user_path_parent(dfd, pathname, &nd, &name);
44706 if (error)
44707 @@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44708 error = -ENOENT;
44709 goto exit3;
44710 }
44711 +
44712 + saved_ino = dentry->d_inode->i_ino;
44713 + saved_dev = gr_get_dev_from_dentry(dentry);
44714 +
44715 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44716 + error = -EACCES;
44717 + goto exit3;
44718 + }
44719 +
44720 error = mnt_want_write(nd.path.mnt);
44721 if (error)
44722 goto exit3;
44723 @@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44724 if (error)
44725 goto exit4;
44726 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44727 + if (!error && (saved_dev || saved_ino))
44728 + gr_handle_delete(saved_ino, saved_dev);
44729 exit4:
44730 mnt_drop_write(nd.path.mnt);
44731 exit3:
44732 @@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44733 struct dentry *dentry;
44734 struct nameidata nd;
44735 struct inode *inode = NULL;
44736 + ino_t saved_ino = 0;
44737 + dev_t saved_dev = 0;
44738
44739 error = user_path_parent(dfd, pathname, &nd, &name);
44740 if (error)
44741 @@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44742 if (!inode)
44743 goto slashes;
44744 ihold(inode);
44745 +
44746 + if (inode->i_nlink <= 1) {
44747 + saved_ino = inode->i_ino;
44748 + saved_dev = gr_get_dev_from_dentry(dentry);
44749 + }
44750 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44751 + error = -EACCES;
44752 + goto exit2;
44753 + }
44754 +
44755 error = mnt_want_write(nd.path.mnt);
44756 if (error)
44757 goto exit2;
44758 @@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44759 if (error)
44760 goto exit3;
44761 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44762 + if (!error && (saved_ino || saved_dev))
44763 + gr_handle_delete(saved_ino, saved_dev);
44764 exit3:
44765 mnt_drop_write(nd.path.mnt);
44766 exit2:
44767 @@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44768 error = mnt_want_write(path.mnt);
44769 if (error)
44770 goto out_dput;
44771 +
44772 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44773 + error = -EACCES;
44774 + goto out_drop_write;
44775 + }
44776 +
44777 error = security_path_symlink(&path, dentry, from);
44778 if (error)
44779 goto out_drop_write;
44780 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44781 + if (!error)
44782 + gr_handle_create(dentry, path.mnt);
44783 out_drop_write:
44784 mnt_drop_write(path.mnt);
44785 out_dput:
44786 @@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44787 {
44788 struct dentry *new_dentry;
44789 struct path old_path, new_path;
44790 + char *to = NULL;
44791 int how = 0;
44792 int error;
44793
44794 @@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44795 if (error)
44796 return error;
44797
44798 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44799 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44800 error = PTR_ERR(new_dentry);
44801 if (IS_ERR(new_dentry))
44802 goto out;
44803 @@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44804 error = mnt_want_write(new_path.mnt);
44805 if (error)
44806 goto out_dput;
44807 +
44808 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44809 + old_path.dentry->d_inode,
44810 + old_path.dentry->d_inode->i_mode, to)) {
44811 + error = -EACCES;
44812 + goto out_drop_write;
44813 + }
44814 +
44815 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44816 + old_path.dentry, old_path.mnt, to)) {
44817 + error = -EACCES;
44818 + goto out_drop_write;
44819 + }
44820 +
44821 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44822 if (error)
44823 goto out_drop_write;
44824 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44825 + if (!error)
44826 + gr_handle_create(new_dentry, new_path.mnt);
44827 out_drop_write:
44828 mnt_drop_write(new_path.mnt);
44829 out_dput:
44830 + putname(to);
44831 dput(new_dentry);
44832 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44833 path_put(&new_path);
44834 @@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44835 if (new_dentry == trap)
44836 goto exit5;
44837
44838 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44839 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44840 + to);
44841 + if (error)
44842 + goto exit5;
44843 +
44844 error = mnt_want_write(oldnd.path.mnt);
44845 if (error)
44846 goto exit5;
44847 @@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44848 goto exit6;
44849 error = vfs_rename(old_dir->d_inode, old_dentry,
44850 new_dir->d_inode, new_dentry);
44851 + if (!error)
44852 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44853 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44854 exit6:
44855 mnt_drop_write(oldnd.path.mnt);
44856 exit5:
44857 @@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44858
44859 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44860 {
44861 + char tmpbuf[64];
44862 + const char *newlink;
44863 int len;
44864
44865 len = PTR_ERR(link);
44866 @@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44867 len = strlen(link);
44868 if (len > (unsigned) buflen)
44869 len = buflen;
44870 - if (copy_to_user(buffer, link, len))
44871 +
44872 + if (len < sizeof(tmpbuf)) {
44873 + memcpy(tmpbuf, link, len);
44874 + newlink = tmpbuf;
44875 + } else
44876 + newlink = link;
44877 +
44878 + if (copy_to_user(buffer, newlink, len))
44879 len = -EFAULT;
44880 out:
44881 return len;
44882 diff --git a/fs/namespace.c b/fs/namespace.c
44883 index cfc6d44..b4632a5 100644
44884 --- a/fs/namespace.c
44885 +++ b/fs/namespace.c
44886 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44887 if (!(sb->s_flags & MS_RDONLY))
44888 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44889 up_write(&sb->s_umount);
44890 +
44891 + gr_log_remount(mnt->mnt_devname, retval);
44892 +
44893 return retval;
44894 }
44895
44896 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44897 br_write_unlock(vfsmount_lock);
44898 up_write(&namespace_sem);
44899 release_mounts(&umount_list);
44900 +
44901 + gr_log_unmount(mnt->mnt_devname, retval);
44902 +
44903 return retval;
44904 }
44905
44906 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44907 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44908 MS_STRICTATIME);
44909
44910 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44911 + retval = -EPERM;
44912 + goto dput_out;
44913 + }
44914 +
44915 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44916 + retval = -EPERM;
44917 + goto dput_out;
44918 + }
44919 +
44920 if (flags & MS_REMOUNT)
44921 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44922 data_page);
44923 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44924 dev_name, data_page);
44925 dput_out:
44926 path_put(&path);
44927 +
44928 + gr_log_mount(dev_name, dir_name, retval);
44929 +
44930 return retval;
44931 }
44932
44933 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44934 if (error)
44935 goto out2;
44936
44937 + if (gr_handle_chroot_pivot()) {
44938 + error = -EPERM;
44939 + goto out2;
44940 + }
44941 +
44942 get_fs_root(current->fs, &root);
44943 error = lock_mount(&old);
44944 if (error)
44945 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44946 index 3db6b82..a57597e 100644
44947 --- a/fs/nfs/blocklayout/blocklayout.c
44948 +++ b/fs/nfs/blocklayout/blocklayout.c
44949 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44950 */
44951 struct parallel_io {
44952 struct kref refcnt;
44953 - struct rpc_call_ops call_ops;
44954 + rpc_call_ops_no_const call_ops;
44955 void (*pnfs_callback) (void *data);
44956 void *data;
44957 };
44958 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44959 index 50a15fa..ca113f9 100644
44960 --- a/fs/nfs/inode.c
44961 +++ b/fs/nfs/inode.c
44962 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44963 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44964 nfsi->attrtimeo_timestamp = jiffies;
44965
44966 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44967 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44968 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44969 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44970 else
44971 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44972 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44973 }
44974
44975 -static atomic_long_t nfs_attr_generation_counter;
44976 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44977
44978 static unsigned long nfs_read_attr_generation_counter(void)
44979 {
44980 - return atomic_long_read(&nfs_attr_generation_counter);
44981 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44982 }
44983
44984 unsigned long nfs_inc_attr_generation_counter(void)
44985 {
44986 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44987 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44988 }
44989
44990 void nfs_fattr_init(struct nfs_fattr *fattr)
44991 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44992 index 7a2e442..8e544cc 100644
44993 --- a/fs/nfsd/vfs.c
44994 +++ b/fs/nfsd/vfs.c
44995 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44996 } else {
44997 oldfs = get_fs();
44998 set_fs(KERNEL_DS);
44999 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
45000 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
45001 set_fs(oldfs);
45002 }
45003
45004 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
45005
45006 /* Write the data. */
45007 oldfs = get_fs(); set_fs(KERNEL_DS);
45008 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
45009 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
45010 set_fs(oldfs);
45011 if (host_err < 0)
45012 goto out_nfserr;
45013 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
45014 */
45015
45016 oldfs = get_fs(); set_fs(KERNEL_DS);
45017 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
45018 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
45019 set_fs(oldfs);
45020
45021 if (host_err < 0)
45022 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45023 index 9fde1c0..14e8827 100644
45024 --- a/fs/notify/fanotify/fanotify_user.c
45025 +++ b/fs/notify/fanotify/fanotify_user.c
45026 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45027 goto out_close_fd;
45028
45029 ret = -EFAULT;
45030 - if (copy_to_user(buf, &fanotify_event_metadata,
45031 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45032 + copy_to_user(buf, &fanotify_event_metadata,
45033 fanotify_event_metadata.event_len))
45034 goto out_kill_access_response;
45035
45036 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45037 index ee18815..7aa5d01 100644
45038 --- a/fs/notify/notification.c
45039 +++ b/fs/notify/notification.c
45040 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45041 * get set to 0 so it will never get 'freed'
45042 */
45043 static struct fsnotify_event *q_overflow_event;
45044 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45045 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45046
45047 /**
45048 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45049 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45050 */
45051 u32 fsnotify_get_cookie(void)
45052 {
45053 - return atomic_inc_return(&fsnotify_sync_cookie);
45054 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45055 }
45056 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45057
45058 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45059 index 99e3610..02c1068 100644
45060 --- a/fs/ntfs/dir.c
45061 +++ b/fs/ntfs/dir.c
45062 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45063 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45064 ~(s64)(ndir->itype.index.block_size - 1)));
45065 /* Bounds checks. */
45066 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45067 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45068 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45069 "inode 0x%lx or driver bug.", vdir->i_ino);
45070 goto err_out;
45071 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45072 index c587e2d..3641eaa 100644
45073 --- a/fs/ntfs/file.c
45074 +++ b/fs/ntfs/file.c
45075 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45076 #endif /* NTFS_RW */
45077 };
45078
45079 -const struct file_operations ntfs_empty_file_ops = {};
45080 +const struct file_operations ntfs_empty_file_ops __read_only;
45081
45082 -const struct inode_operations ntfs_empty_inode_ops = {};
45083 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45084 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45085 index 210c352..a174f83 100644
45086 --- a/fs/ocfs2/localalloc.c
45087 +++ b/fs/ocfs2/localalloc.c
45088 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45089 goto bail;
45090 }
45091
45092 - atomic_inc(&osb->alloc_stats.moves);
45093 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45094
45095 bail:
45096 if (handle)
45097 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45098 index d355e6e..578d905 100644
45099 --- a/fs/ocfs2/ocfs2.h
45100 +++ b/fs/ocfs2/ocfs2.h
45101 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45102
45103 struct ocfs2_alloc_stats
45104 {
45105 - atomic_t moves;
45106 - atomic_t local_data;
45107 - atomic_t bitmap_data;
45108 - atomic_t bg_allocs;
45109 - atomic_t bg_extends;
45110 + atomic_unchecked_t moves;
45111 + atomic_unchecked_t local_data;
45112 + atomic_unchecked_t bitmap_data;
45113 + atomic_unchecked_t bg_allocs;
45114 + atomic_unchecked_t bg_extends;
45115 };
45116
45117 enum ocfs2_local_alloc_state
45118 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45119 index ba5d97e..c77db25 100644
45120 --- a/fs/ocfs2/suballoc.c
45121 +++ b/fs/ocfs2/suballoc.c
45122 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45123 mlog_errno(status);
45124 goto bail;
45125 }
45126 - atomic_inc(&osb->alloc_stats.bg_extends);
45127 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45128
45129 /* You should never ask for this much metadata */
45130 BUG_ON(bits_wanted >
45131 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45132 mlog_errno(status);
45133 goto bail;
45134 }
45135 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45136 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45137
45138 *suballoc_loc = res.sr_bg_blkno;
45139 *suballoc_bit_start = res.sr_bit_offset;
45140 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45141 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45142 res->sr_bits);
45143
45144 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45145 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45146
45147 BUG_ON(res->sr_bits != 1);
45148
45149 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45150 mlog_errno(status);
45151 goto bail;
45152 }
45153 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45154 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45155
45156 BUG_ON(res.sr_bits != 1);
45157
45158 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45159 cluster_start,
45160 num_clusters);
45161 if (!status)
45162 - atomic_inc(&osb->alloc_stats.local_data);
45163 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45164 } else {
45165 if (min_clusters > (osb->bitmap_cpg - 1)) {
45166 /* The only paths asking for contiguousness
45167 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45168 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45169 res.sr_bg_blkno,
45170 res.sr_bit_offset);
45171 - atomic_inc(&osb->alloc_stats.bitmap_data);
45172 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45173 *num_clusters = res.sr_bits;
45174 }
45175 }
45176 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45177 index 4994f8b..eaab8eb 100644
45178 --- a/fs/ocfs2/super.c
45179 +++ b/fs/ocfs2/super.c
45180 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45181 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45182 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45183 "Stats",
45184 - atomic_read(&osb->alloc_stats.bitmap_data),
45185 - atomic_read(&osb->alloc_stats.local_data),
45186 - atomic_read(&osb->alloc_stats.bg_allocs),
45187 - atomic_read(&osb->alloc_stats.moves),
45188 - atomic_read(&osb->alloc_stats.bg_extends));
45189 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45190 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45191 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45192 + atomic_read_unchecked(&osb->alloc_stats.moves),
45193 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45194
45195 out += snprintf(buf + out, len - out,
45196 "%10s => State: %u Descriptor: %llu Size: %u bits "
45197 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45198 spin_lock_init(&osb->osb_xattr_lock);
45199 ocfs2_init_steal_slots(osb);
45200
45201 - atomic_set(&osb->alloc_stats.moves, 0);
45202 - atomic_set(&osb->alloc_stats.local_data, 0);
45203 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45204 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45205 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45206 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45207 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45208 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45209 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45210 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45211
45212 /* Copy the blockcheck stats from the superblock probe */
45213 osb->osb_ecc_stats = *stats;
45214 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45215 index 5d22872..523db20 100644
45216 --- a/fs/ocfs2/symlink.c
45217 +++ b/fs/ocfs2/symlink.c
45218 @@ -142,7 +142,7 @@ bail:
45219
45220 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45221 {
45222 - char *link = nd_get_link(nd);
45223 + const char *link = nd_get_link(nd);
45224 if (!IS_ERR(link))
45225 kfree(link);
45226 }
45227 diff --git a/fs/open.c b/fs/open.c
45228 index 22c41b5..78894cf 100644
45229 --- a/fs/open.c
45230 +++ b/fs/open.c
45231 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45232 error = locks_verify_truncate(inode, NULL, length);
45233 if (!error)
45234 error = security_path_truncate(&path);
45235 +
45236 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45237 + error = -EACCES;
45238 +
45239 if (!error)
45240 error = do_truncate(path.dentry, length, 0, NULL);
45241
45242 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45243 if (__mnt_is_readonly(path.mnt))
45244 res = -EROFS;
45245
45246 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45247 + res = -EACCES;
45248 +
45249 out_path_release:
45250 path_put(&path);
45251 out:
45252 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45253 if (error)
45254 goto dput_and_out;
45255
45256 + gr_log_chdir(path.dentry, path.mnt);
45257 +
45258 set_fs_pwd(current->fs, &path);
45259
45260 dput_and_out:
45261 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45262 goto out_putf;
45263
45264 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45265 +
45266 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45267 + error = -EPERM;
45268 +
45269 + if (!error)
45270 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45271 +
45272 if (!error)
45273 set_fs_pwd(current->fs, &file->f_path);
45274 out_putf:
45275 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45276 if (error)
45277 goto dput_and_out;
45278
45279 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45280 + goto dput_and_out;
45281 +
45282 set_fs_root(current->fs, &path);
45283 +
45284 + gr_handle_chroot_chdir(&path);
45285 +
45286 error = 0;
45287 dput_and_out:
45288 path_put(&path);
45289 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45290 if (error)
45291 return error;
45292 mutex_lock(&inode->i_mutex);
45293 +
45294 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45295 + error = -EACCES;
45296 + goto out_unlock;
45297 + }
45298 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45299 + error = -EACCES;
45300 + goto out_unlock;
45301 + }
45302 +
45303 error = security_path_chmod(path->dentry, path->mnt, mode);
45304 if (error)
45305 goto out_unlock;
45306 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45307 int error;
45308 struct iattr newattrs;
45309
45310 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45311 + return -EACCES;
45312 +
45313 newattrs.ia_valid = ATTR_CTIME;
45314 if (user != (uid_t) -1) {
45315 newattrs.ia_valid |= ATTR_UID;
45316 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45317 index 6296b40..417c00f 100644
45318 --- a/fs/partitions/efi.c
45319 +++ b/fs/partitions/efi.c
45320 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45321 if (!gpt)
45322 return NULL;
45323
45324 + if (!le32_to_cpu(gpt->num_partition_entries))
45325 + return NULL;
45326 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45327 + if (!pte)
45328 + return NULL;
45329 +
45330 count = le32_to_cpu(gpt->num_partition_entries) *
45331 le32_to_cpu(gpt->sizeof_partition_entry);
45332 - if (!count)
45333 - return NULL;
45334 - pte = kzalloc(count, GFP_KERNEL);
45335 - if (!pte)
45336 - return NULL;
45337 -
45338 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45339 (u8 *) pte,
45340 count) < count) {
45341 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45342 index bd8ae78..539d250 100644
45343 --- a/fs/partitions/ldm.c
45344 +++ b/fs/partitions/ldm.c
45345 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45346 goto found;
45347 }
45348
45349 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45350 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45351 if (!f) {
45352 ldm_crit ("Out of memory.");
45353 return false;
45354 diff --git a/fs/pipe.c b/fs/pipe.c
45355 index 4065f07..68c0706 100644
45356 --- a/fs/pipe.c
45357 +++ b/fs/pipe.c
45358 @@ -420,9 +420,9 @@ redo:
45359 }
45360 if (bufs) /* More to do? */
45361 continue;
45362 - if (!pipe->writers)
45363 + if (!atomic_read(&pipe->writers))
45364 break;
45365 - if (!pipe->waiting_writers) {
45366 + if (!atomic_read(&pipe->waiting_writers)) {
45367 /* syscall merging: Usually we must not sleep
45368 * if O_NONBLOCK is set, or if we got some data.
45369 * But if a writer sleeps in kernel space, then
45370 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45371 mutex_lock(&inode->i_mutex);
45372 pipe = inode->i_pipe;
45373
45374 - if (!pipe->readers) {
45375 + if (!atomic_read(&pipe->readers)) {
45376 send_sig(SIGPIPE, current, 0);
45377 ret = -EPIPE;
45378 goto out;
45379 @@ -530,7 +530,7 @@ redo1:
45380 for (;;) {
45381 int bufs;
45382
45383 - if (!pipe->readers) {
45384 + if (!atomic_read(&pipe->readers)) {
45385 send_sig(SIGPIPE, current, 0);
45386 if (!ret)
45387 ret = -EPIPE;
45388 @@ -616,9 +616,9 @@ redo2:
45389 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45390 do_wakeup = 0;
45391 }
45392 - pipe->waiting_writers++;
45393 + atomic_inc(&pipe->waiting_writers);
45394 pipe_wait(pipe);
45395 - pipe->waiting_writers--;
45396 + atomic_dec(&pipe->waiting_writers);
45397 }
45398 out:
45399 mutex_unlock(&inode->i_mutex);
45400 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45401 mask = 0;
45402 if (filp->f_mode & FMODE_READ) {
45403 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45404 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45405 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45406 mask |= POLLHUP;
45407 }
45408
45409 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45410 * Most Unices do not set POLLERR for FIFOs but on Linux they
45411 * behave exactly like pipes for poll().
45412 */
45413 - if (!pipe->readers)
45414 + if (!atomic_read(&pipe->readers))
45415 mask |= POLLERR;
45416 }
45417
45418 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45419
45420 mutex_lock(&inode->i_mutex);
45421 pipe = inode->i_pipe;
45422 - pipe->readers -= decr;
45423 - pipe->writers -= decw;
45424 + atomic_sub(decr, &pipe->readers);
45425 + atomic_sub(decw, &pipe->writers);
45426
45427 - if (!pipe->readers && !pipe->writers) {
45428 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45429 free_pipe_info(inode);
45430 } else {
45431 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45432 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45433
45434 if (inode->i_pipe) {
45435 ret = 0;
45436 - inode->i_pipe->readers++;
45437 + atomic_inc(&inode->i_pipe->readers);
45438 }
45439
45440 mutex_unlock(&inode->i_mutex);
45441 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45442
45443 if (inode->i_pipe) {
45444 ret = 0;
45445 - inode->i_pipe->writers++;
45446 + atomic_inc(&inode->i_pipe->writers);
45447 }
45448
45449 mutex_unlock(&inode->i_mutex);
45450 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45451 if (inode->i_pipe) {
45452 ret = 0;
45453 if (filp->f_mode & FMODE_READ)
45454 - inode->i_pipe->readers++;
45455 + atomic_inc(&inode->i_pipe->readers);
45456 if (filp->f_mode & FMODE_WRITE)
45457 - inode->i_pipe->writers++;
45458 + atomic_inc(&inode->i_pipe->writers);
45459 }
45460
45461 mutex_unlock(&inode->i_mutex);
45462 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45463 inode->i_pipe = NULL;
45464 }
45465
45466 -static struct vfsmount *pipe_mnt __read_mostly;
45467 +struct vfsmount *pipe_mnt __read_mostly;
45468
45469 /*
45470 * pipefs_dname() is called from d_path().
45471 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45472 goto fail_iput;
45473 inode->i_pipe = pipe;
45474
45475 - pipe->readers = pipe->writers = 1;
45476 + atomic_set(&pipe->readers, 1);
45477 + atomic_set(&pipe->writers, 1);
45478 inode->i_fop = &rdwr_pipefifo_fops;
45479
45480 /*
45481 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45482 index 15af622..0e9f4467 100644
45483 --- a/fs/proc/Kconfig
45484 +++ b/fs/proc/Kconfig
45485 @@ -30,12 +30,12 @@ config PROC_FS
45486
45487 config PROC_KCORE
45488 bool "/proc/kcore support" if !ARM
45489 - depends on PROC_FS && MMU
45490 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45491
45492 config PROC_VMCORE
45493 bool "/proc/vmcore support"
45494 - depends on PROC_FS && CRASH_DUMP
45495 - default y
45496 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45497 + default n
45498 help
45499 Exports the dump image of crashed kernel in ELF format.
45500
45501 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45502 limited in memory.
45503
45504 config PROC_PAGE_MONITOR
45505 - default y
45506 - depends on PROC_FS && MMU
45507 + default n
45508 + depends on PROC_FS && MMU && !GRKERNSEC
45509 bool "Enable /proc page monitoring" if EXPERT
45510 help
45511 Various /proc files exist to monitor process memory utilization:
45512 diff --git a/fs/proc/array.c b/fs/proc/array.c
45513 index 3a1dafd..bf1bd84 100644
45514 --- a/fs/proc/array.c
45515 +++ b/fs/proc/array.c
45516 @@ -60,6 +60,7 @@
45517 #include <linux/tty.h>
45518 #include <linux/string.h>
45519 #include <linux/mman.h>
45520 +#include <linux/grsecurity.h>
45521 #include <linux/proc_fs.h>
45522 #include <linux/ioport.h>
45523 #include <linux/uaccess.h>
45524 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45525 seq_putc(m, '\n');
45526 }
45527
45528 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45529 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45530 +{
45531 + if (p->mm)
45532 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45533 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45534 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45535 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45536 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45537 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45538 + else
45539 + seq_printf(m, "PaX:\t-----\n");
45540 +}
45541 +#endif
45542 +
45543 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45544 struct pid *pid, struct task_struct *task)
45545 {
45546 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45547 task_cpus_allowed(m, task);
45548 cpuset_task_status_allowed(m, task);
45549 task_context_switch_counts(m, task);
45550 +
45551 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45552 + task_pax(m, task);
45553 +#endif
45554 +
45555 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45556 + task_grsec_rbac(m, task);
45557 +#endif
45558 +
45559 return 0;
45560 }
45561
45562 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45563 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45564 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45565 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45566 +#endif
45567 +
45568 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45569 struct pid *pid, struct task_struct *task, int whole)
45570 {
45571 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45572 char tcomm[sizeof(task->comm)];
45573 unsigned long flags;
45574
45575 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45576 + if (current->exec_id != m->exec_id) {
45577 + gr_log_badprocpid("stat");
45578 + return 0;
45579 + }
45580 +#endif
45581 +
45582 state = *get_task_state(task);
45583 vsize = eip = esp = 0;
45584 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45585 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45586 gtime = task->gtime;
45587 }
45588
45589 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45590 + if (PAX_RAND_FLAGS(mm)) {
45591 + eip = 0;
45592 + esp = 0;
45593 + wchan = 0;
45594 + }
45595 +#endif
45596 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45597 + wchan = 0;
45598 + eip =0;
45599 + esp =0;
45600 +#endif
45601 +
45602 /* scale priority and nice values from timeslices to -20..20 */
45603 /* to make it look like a "normal" Unix priority/nice value */
45604 priority = task_prio(task);
45605 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45606 vsize,
45607 mm ? get_mm_rss(mm) : 0,
45608 rsslim,
45609 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45610 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45611 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45612 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45613 +#else
45614 mm ? (permitted ? mm->start_code : 1) : 0,
45615 mm ? (permitted ? mm->end_code : 1) : 0,
45616 (permitted && mm) ? mm->start_stack : 0,
45617 +#endif
45618 esp,
45619 eip,
45620 /* The signal information here is obsolete.
45621 @@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45622 struct pid *pid, struct task_struct *task)
45623 {
45624 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45625 - struct mm_struct *mm = get_task_mm(task);
45626 + struct mm_struct *mm;
45627
45628 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45629 + if (current->exec_id != m->exec_id) {
45630 + gr_log_badprocpid("statm");
45631 + return 0;
45632 + }
45633 +#endif
45634 + mm = get_task_mm(task);
45635 if (mm) {
45636 size = task_statm(mm, &shared, &text, &data, &resident);
45637 mmput(mm);
45638 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45639
45640 return 0;
45641 }
45642 +
45643 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45644 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45645 +{
45646 + u32 curr_ip = 0;
45647 + unsigned long flags;
45648 +
45649 + if (lock_task_sighand(task, &flags)) {
45650 + curr_ip = task->signal->curr_ip;
45651 + unlock_task_sighand(task, &flags);
45652 + }
45653 +
45654 + return sprintf(buffer, "%pI4\n", &curr_ip);
45655 +}
45656 +#endif
45657 diff --git a/fs/proc/base.c b/fs/proc/base.c
45658 index 1ace83d..f5e575d 100644
45659 --- a/fs/proc/base.c
45660 +++ b/fs/proc/base.c
45661 @@ -107,6 +107,22 @@ struct pid_entry {
45662 union proc_op op;
45663 };
45664
45665 +struct getdents_callback {
45666 + struct linux_dirent __user * current_dir;
45667 + struct linux_dirent __user * previous;
45668 + struct file * file;
45669 + int count;
45670 + int error;
45671 +};
45672 +
45673 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45674 + loff_t offset, u64 ino, unsigned int d_type)
45675 +{
45676 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45677 + buf->error = -EINVAL;
45678 + return 0;
45679 +}
45680 +
45681 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45682 .name = (NAME), \
45683 .len = sizeof(NAME) - 1, \
45684 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45685 return result;
45686 }
45687
45688 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45689 -{
45690 - struct mm_struct *mm;
45691 - int err;
45692 -
45693 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45694 - if (err)
45695 - return ERR_PTR(err);
45696 -
45697 - mm = get_task_mm(task);
45698 - if (mm && mm != current->mm &&
45699 - !ptrace_may_access(task, mode)) {
45700 - mmput(mm);
45701 - mm = ERR_PTR(-EACCES);
45702 - }
45703 - mutex_unlock(&task->signal->cred_guard_mutex);
45704 -
45705 - return mm;
45706 -}
45707 -
45708 struct mm_struct *mm_for_maps(struct task_struct *task)
45709 {
45710 return mm_access(task, PTRACE_MODE_READ);
45711 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45712 if (!mm->arg_end)
45713 goto out_mm; /* Shh! No looking before we're done */
45714
45715 + if (gr_acl_handle_procpidmem(task))
45716 + goto out_mm;
45717 +
45718 len = mm->arg_end - mm->arg_start;
45719
45720 if (len > PAGE_SIZE)
45721 @@ -256,12 +255,28 @@ out:
45722 return res;
45723 }
45724
45725 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45726 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45727 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45728 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45729 +#endif
45730 +
45731 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45732 {
45733 struct mm_struct *mm = mm_for_maps(task);
45734 int res = PTR_ERR(mm);
45735 if (mm && !IS_ERR(mm)) {
45736 unsigned int nwords = 0;
45737 +
45738 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45739 + /* allow if we're currently ptracing this task */
45740 + if (PAX_RAND_FLAGS(mm) &&
45741 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45742 + mmput(mm);
45743 + return 0;
45744 + }
45745 +#endif
45746 +
45747 do {
45748 nwords += 2;
45749 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45750 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45751 }
45752
45753
45754 -#ifdef CONFIG_KALLSYMS
45755 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45756 /*
45757 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45758 * Returns the resolved symbol. If that fails, simply return the address.
45759 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45760 mutex_unlock(&task->signal->cred_guard_mutex);
45761 }
45762
45763 -#ifdef CONFIG_STACKTRACE
45764 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45765
45766 #define MAX_STACK_TRACE_DEPTH 64
45767
45768 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45769 return count;
45770 }
45771
45772 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45773 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45774 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45775 {
45776 long nr;
45777 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45778 /************************************************************************/
45779
45780 /* permission checks */
45781 -static int proc_fd_access_allowed(struct inode *inode)
45782 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45783 {
45784 struct task_struct *task;
45785 int allowed = 0;
45786 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45787 */
45788 task = get_proc_task(inode);
45789 if (task) {
45790 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45791 + if (log)
45792 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45793 + else
45794 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45795 put_task_struct(task);
45796 }
45797 return allowed;
45798 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
45799 file->f_mode |= FMODE_UNSIGNED_OFFSET;
45800 file->private_data = mm;
45801
45802 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45803 + file->f_version = current->exec_id;
45804 +#endif
45805 +
45806 return 0;
45807 }
45808
45809 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45810 ssize_t copied;
45811 char *page;
45812
45813 +#ifdef CONFIG_GRKERNSEC
45814 + if (write)
45815 + return -EPERM;
45816 +#endif
45817 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45818 + if (file->f_version != current->exec_id) {
45819 + gr_log_badprocpid("mem");
45820 + return 0;
45821 + }
45822 +#endif
45823 +
45824 if (!mm)
45825 return 0;
45826
45827 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45828 if (!task)
45829 goto out_no_task;
45830
45831 + if (gr_acl_handle_procpidmem(task))
45832 + goto out;
45833 +
45834 ret = -ENOMEM;
45835 page = (char *)__get_free_page(GFP_TEMPORARY);
45836 if (!page)
45837 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45838 path_put(&nd->path);
45839
45840 /* Are we allowed to snoop on the tasks file descriptors? */
45841 - if (!proc_fd_access_allowed(inode))
45842 + if (!proc_fd_access_allowed(inode,0))
45843 goto out;
45844
45845 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45846 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45847 struct path path;
45848
45849 /* Are we allowed to snoop on the tasks file descriptors? */
45850 - if (!proc_fd_access_allowed(inode))
45851 - goto out;
45852 + /* logging this is needed for learning on chromium to work properly,
45853 + but we don't want to flood the logs from 'ps' which does a readlink
45854 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45855 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45856 + */
45857 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45858 + if (!proc_fd_access_allowed(inode,0))
45859 + goto out;
45860 + } else {
45861 + if (!proc_fd_access_allowed(inode,1))
45862 + goto out;
45863 + }
45864
45865 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45866 if (error)
45867 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45868 rcu_read_lock();
45869 cred = __task_cred(task);
45870 inode->i_uid = cred->euid;
45871 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45872 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45873 +#else
45874 inode->i_gid = cred->egid;
45875 +#endif
45876 rcu_read_unlock();
45877 }
45878 security_task_to_inode(task, inode);
45879 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45880 struct inode *inode = dentry->d_inode;
45881 struct task_struct *task;
45882 const struct cred *cred;
45883 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45884 + const struct cred *tmpcred = current_cred();
45885 +#endif
45886
45887 generic_fillattr(inode, stat);
45888
45889 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45890 stat->uid = 0;
45891 stat->gid = 0;
45892 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45893 +
45894 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45895 + rcu_read_unlock();
45896 + return -ENOENT;
45897 + }
45898 +
45899 if (task) {
45900 + cred = __task_cred(task);
45901 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45902 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45903 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45904 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45905 +#endif
45906 + ) {
45907 +#endif
45908 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45909 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45910 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45911 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45912 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45913 +#endif
45914 task_dumpable(task)) {
45915 - cred = __task_cred(task);
45916 stat->uid = cred->euid;
45917 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45918 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45919 +#else
45920 stat->gid = cred->egid;
45921 +#endif
45922 }
45923 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45924 + } else {
45925 + rcu_read_unlock();
45926 + return -ENOENT;
45927 + }
45928 +#endif
45929 }
45930 rcu_read_unlock();
45931 return 0;
45932 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45933
45934 if (task) {
45935 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45936 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45937 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45938 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45939 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45940 +#endif
45941 task_dumpable(task)) {
45942 rcu_read_lock();
45943 cred = __task_cred(task);
45944 inode->i_uid = cred->euid;
45945 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45946 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45947 +#else
45948 inode->i_gid = cred->egid;
45949 +#endif
45950 rcu_read_unlock();
45951 } else {
45952 inode->i_uid = 0;
45953 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45954 int fd = proc_fd(inode);
45955
45956 if (task) {
45957 - files = get_files_struct(task);
45958 + if (!gr_acl_handle_procpidmem(task))
45959 + files = get_files_struct(task);
45960 put_task_struct(task);
45961 }
45962 if (files) {
45963 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
45964 */
45965 static int proc_fd_permission(struct inode *inode, int mask)
45966 {
45967 + struct task_struct *task;
45968 int rv = generic_permission(inode, mask);
45969 - if (rv == 0)
45970 - return 0;
45971 +
45972 if (task_pid(current) == proc_pid(inode))
45973 rv = 0;
45974 +
45975 + task = get_proc_task(inode);
45976 + if (task == NULL)
45977 + return rv;
45978 +
45979 + if (gr_acl_handle_procpidmem(task))
45980 + rv = -EACCES;
45981 +
45982 + put_task_struct(task);
45983 +
45984 return rv;
45985 }
45986
45987 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45988 if (!task)
45989 goto out_no_task;
45990
45991 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45992 + goto out;
45993 +
45994 /*
45995 * Yes, it does not scale. And it should not. Don't add
45996 * new entries into /proc/<tgid>/ without very good reasons.
45997 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
45998 if (!task)
45999 goto out_no_task;
46000
46001 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46002 + goto out;
46003 +
46004 ret = 0;
46005 i = filp->f_pos;
46006 switch (i) {
46007 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
46008 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
46009 void *cookie)
46010 {
46011 - char *s = nd_get_link(nd);
46012 + const char *s = nd_get_link(nd);
46013 if (!IS_ERR(s))
46014 __putname(s);
46015 }
46016 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
46017 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
46018 #endif
46019 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46020 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46021 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46022 INF("syscall", S_IRUGO, proc_pid_syscall),
46023 #endif
46024 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46025 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
46026 #ifdef CONFIG_SECURITY
46027 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46028 #endif
46029 -#ifdef CONFIG_KALLSYMS
46030 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46031 INF("wchan", S_IRUGO, proc_pid_wchan),
46032 #endif
46033 -#ifdef CONFIG_STACKTRACE
46034 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46035 ONE("stack", S_IRUGO, proc_pid_stack),
46036 #endif
46037 #ifdef CONFIG_SCHEDSTATS
46038 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
46039 #ifdef CONFIG_HARDWALL
46040 INF("hardwall", S_IRUGO, proc_pid_hardwall),
46041 #endif
46042 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46043 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
46044 +#endif
46045 };
46046
46047 static int proc_tgid_base_readdir(struct file * filp,
46048 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
46049 if (!inode)
46050 goto out;
46051
46052 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46053 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
46054 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46055 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46056 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
46057 +#else
46058 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
46059 +#endif
46060 inode->i_op = &proc_tgid_base_inode_operations;
46061 inode->i_fop = &proc_tgid_base_operations;
46062 inode->i_flags|=S_IMMUTABLE;
46063 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
46064 if (!task)
46065 goto out;
46066
46067 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
46068 + goto out_put_task;
46069 +
46070 result = proc_pid_instantiate(dir, dentry, task, NULL);
46071 +out_put_task:
46072 put_task_struct(task);
46073 out:
46074 return result;
46075 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46076 {
46077 unsigned int nr;
46078 struct task_struct *reaper;
46079 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46080 + const struct cred *tmpcred = current_cred();
46081 + const struct cred *itercred;
46082 +#endif
46083 + filldir_t __filldir = filldir;
46084 struct tgid_iter iter;
46085 struct pid_namespace *ns;
46086
46087 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46088 for (iter = next_tgid(ns, iter);
46089 iter.task;
46090 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46091 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46092 + rcu_read_lock();
46093 + itercred = __task_cred(iter.task);
46094 +#endif
46095 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46096 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46097 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46098 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46099 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46100 +#endif
46101 + )
46102 +#endif
46103 + )
46104 + __filldir = &gr_fake_filldir;
46105 + else
46106 + __filldir = filldir;
46107 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46108 + rcu_read_unlock();
46109 +#endif
46110 filp->f_pos = iter.tgid + TGID_OFFSET;
46111 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46112 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46113 put_task_struct(iter.task);
46114 goto out;
46115 }
46116 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
46117 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46118 #endif
46119 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46120 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46121 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46122 INF("syscall", S_IRUGO, proc_pid_syscall),
46123 #endif
46124 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46125 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
46126 #ifdef CONFIG_SECURITY
46127 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46128 #endif
46129 -#ifdef CONFIG_KALLSYMS
46130 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46131 INF("wchan", S_IRUGO, proc_pid_wchan),
46132 #endif
46133 -#ifdef CONFIG_STACKTRACE
46134 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46135 ONE("stack", S_IRUGO, proc_pid_stack),
46136 #endif
46137 #ifdef CONFIG_SCHEDSTATS
46138 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46139 index 82676e3..5f8518a 100644
46140 --- a/fs/proc/cmdline.c
46141 +++ b/fs/proc/cmdline.c
46142 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46143
46144 static int __init proc_cmdline_init(void)
46145 {
46146 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46147 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46148 +#else
46149 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46150 +#endif
46151 return 0;
46152 }
46153 module_init(proc_cmdline_init);
46154 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46155 index b143471..bb105e5 100644
46156 --- a/fs/proc/devices.c
46157 +++ b/fs/proc/devices.c
46158 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46159
46160 static int __init proc_devices_init(void)
46161 {
46162 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46163 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46164 +#else
46165 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46166 +#endif
46167 return 0;
46168 }
46169 module_init(proc_devices_init);
46170 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46171 index 7737c54..7172574 100644
46172 --- a/fs/proc/inode.c
46173 +++ b/fs/proc/inode.c
46174 @@ -18,12 +18,18 @@
46175 #include <linux/module.h>
46176 #include <linux/sysctl.h>
46177 #include <linux/slab.h>
46178 +#include <linux/grsecurity.h>
46179
46180 #include <asm/system.h>
46181 #include <asm/uaccess.h>
46182
46183 #include "internal.h"
46184
46185 +#ifdef CONFIG_PROC_SYSCTL
46186 +extern const struct inode_operations proc_sys_inode_operations;
46187 +extern const struct inode_operations proc_sys_dir_operations;
46188 +#endif
46189 +
46190 static void proc_evict_inode(struct inode *inode)
46191 {
46192 struct proc_dir_entry *de;
46193 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46194 ns_ops = PROC_I(inode)->ns_ops;
46195 if (ns_ops && ns_ops->put)
46196 ns_ops->put(PROC_I(inode)->ns);
46197 +
46198 +#ifdef CONFIG_PROC_SYSCTL
46199 + if (inode->i_op == &proc_sys_inode_operations ||
46200 + inode->i_op == &proc_sys_dir_operations)
46201 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46202 +#endif
46203 +
46204 }
46205
46206 static struct kmem_cache * proc_inode_cachep;
46207 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46208 if (de->mode) {
46209 inode->i_mode = de->mode;
46210 inode->i_uid = de->uid;
46211 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46212 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46213 +#else
46214 inode->i_gid = de->gid;
46215 +#endif
46216 }
46217 if (de->size)
46218 inode->i_size = de->size;
46219 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46220 index 7838e5c..ff92cbc 100644
46221 --- a/fs/proc/internal.h
46222 +++ b/fs/proc/internal.h
46223 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46224 struct pid *pid, struct task_struct *task);
46225 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46226 struct pid *pid, struct task_struct *task);
46227 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46228 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46229 +#endif
46230 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46231
46232 extern const struct file_operations proc_maps_operations;
46233 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46234 index d245cb2..f4e8498 100644
46235 --- a/fs/proc/kcore.c
46236 +++ b/fs/proc/kcore.c
46237 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46238 * the addresses in the elf_phdr on our list.
46239 */
46240 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46241 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46242 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46243 + if (tsz > buflen)
46244 tsz = buflen;
46245 -
46246 +
46247 while (buflen) {
46248 struct kcore_list *m;
46249
46250 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46251 kfree(elf_buf);
46252 } else {
46253 if (kern_addr_valid(start)) {
46254 - unsigned long n;
46255 + char *elf_buf;
46256 + mm_segment_t oldfs;
46257
46258 - n = copy_to_user(buffer, (char *)start, tsz);
46259 - /*
46260 - * We cannot distingush between fault on source
46261 - * and fault on destination. When this happens
46262 - * we clear too and hope it will trigger the
46263 - * EFAULT again.
46264 - */
46265 - if (n) {
46266 - if (clear_user(buffer + tsz - n,
46267 - n))
46268 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46269 + if (!elf_buf)
46270 + return -ENOMEM;
46271 + oldfs = get_fs();
46272 + set_fs(KERNEL_DS);
46273 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46274 + set_fs(oldfs);
46275 + if (copy_to_user(buffer, elf_buf, tsz)) {
46276 + kfree(elf_buf);
46277 return -EFAULT;
46278 + }
46279 }
46280 + set_fs(oldfs);
46281 + kfree(elf_buf);
46282 } else {
46283 if (clear_user(buffer, tsz))
46284 return -EFAULT;
46285 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46286
46287 static int open_kcore(struct inode *inode, struct file *filp)
46288 {
46289 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46290 + return -EPERM;
46291 +#endif
46292 if (!capable(CAP_SYS_RAWIO))
46293 return -EPERM;
46294 if (kcore_need_update)
46295 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46296 index 80e4645..53e5fcf 100644
46297 --- a/fs/proc/meminfo.c
46298 +++ b/fs/proc/meminfo.c
46299 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46300 vmi.used >> 10,
46301 vmi.largest_chunk >> 10
46302 #ifdef CONFIG_MEMORY_FAILURE
46303 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46304 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46305 #endif
46306 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46307 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46308 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46309 index b1822dd..df622cb 100644
46310 --- a/fs/proc/nommu.c
46311 +++ b/fs/proc/nommu.c
46312 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46313 if (len < 1)
46314 len = 1;
46315 seq_printf(m, "%*c", len, ' ');
46316 - seq_path(m, &file->f_path, "");
46317 + seq_path(m, &file->f_path, "\n\\");
46318 }
46319
46320 seq_putc(m, '\n');
46321 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46322 index f738024..876984a 100644
46323 --- a/fs/proc/proc_net.c
46324 +++ b/fs/proc/proc_net.c
46325 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46326 struct task_struct *task;
46327 struct nsproxy *ns;
46328 struct net *net = NULL;
46329 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46330 + const struct cred *cred = current_cred();
46331 +#endif
46332 +
46333 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46334 + if (cred->fsuid)
46335 + return net;
46336 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46337 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46338 + return net;
46339 +#endif
46340
46341 rcu_read_lock();
46342 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46343 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46344 index a6b6217..1e0579d 100644
46345 --- a/fs/proc/proc_sysctl.c
46346 +++ b/fs/proc/proc_sysctl.c
46347 @@ -9,11 +9,13 @@
46348 #include <linux/namei.h>
46349 #include "internal.h"
46350
46351 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46352 +
46353 static const struct dentry_operations proc_sys_dentry_operations;
46354 static const struct file_operations proc_sys_file_operations;
46355 -static const struct inode_operations proc_sys_inode_operations;
46356 +const struct inode_operations proc_sys_inode_operations;
46357 static const struct file_operations proc_sys_dir_file_operations;
46358 -static const struct inode_operations proc_sys_dir_operations;
46359 +const struct inode_operations proc_sys_dir_operations;
46360
46361 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46362 {
46363 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46364
46365 err = NULL;
46366 d_set_d_op(dentry, &proc_sys_dentry_operations);
46367 +
46368 + gr_handle_proc_create(dentry, inode);
46369 +
46370 d_add(dentry, inode);
46371
46372 + if (gr_handle_sysctl(p, MAY_EXEC))
46373 + err = ERR_PTR(-ENOENT);
46374 +
46375 out:
46376 sysctl_head_finish(head);
46377 return err;
46378 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46379 if (!table->proc_handler)
46380 goto out;
46381
46382 +#ifdef CONFIG_GRKERNSEC
46383 + error = -EPERM;
46384 + if (write && !capable(CAP_SYS_ADMIN))
46385 + goto out;
46386 +#endif
46387 +
46388 /* careful: calling conventions are nasty here */
46389 res = count;
46390 error = table->proc_handler(table, write, buf, &res, ppos);
46391 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46392 return -ENOMEM;
46393 } else {
46394 d_set_d_op(child, &proc_sys_dentry_operations);
46395 +
46396 + gr_handle_proc_create(child, inode);
46397 +
46398 d_add(child, inode);
46399 }
46400 } else {
46401 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46402 if (*pos < file->f_pos)
46403 continue;
46404
46405 + if (gr_handle_sysctl(table, 0))
46406 + continue;
46407 +
46408 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46409 if (res)
46410 return res;
46411 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46412 if (IS_ERR(head))
46413 return PTR_ERR(head);
46414
46415 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46416 + return -ENOENT;
46417 +
46418 generic_fillattr(inode, stat);
46419 if (table)
46420 stat->mode = (stat->mode & S_IFMT) | table->mode;
46421 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46422 .llseek = generic_file_llseek,
46423 };
46424
46425 -static const struct inode_operations proc_sys_inode_operations = {
46426 +const struct inode_operations proc_sys_inode_operations = {
46427 .permission = proc_sys_permission,
46428 .setattr = proc_sys_setattr,
46429 .getattr = proc_sys_getattr,
46430 };
46431
46432 -static const struct inode_operations proc_sys_dir_operations = {
46433 +const struct inode_operations proc_sys_dir_operations = {
46434 .lookup = proc_sys_lookup,
46435 .permission = proc_sys_permission,
46436 .setattr = proc_sys_setattr,
46437 diff --git a/fs/proc/root.c b/fs/proc/root.c
46438 index 03102d9..4ae347e 100644
46439 --- a/fs/proc/root.c
46440 +++ b/fs/proc/root.c
46441 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46442 #ifdef CONFIG_PROC_DEVICETREE
46443 proc_device_tree_init();
46444 #endif
46445 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46446 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46447 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46448 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46449 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46450 +#endif
46451 +#else
46452 proc_mkdir("bus", NULL);
46453 +#endif
46454 proc_sys_init();
46455 }
46456
46457 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46458 index 7dcd2a2..b2f410e 100644
46459 --- a/fs/proc/task_mmu.c
46460 +++ b/fs/proc/task_mmu.c
46461 @@ -11,6 +11,7 @@
46462 #include <linux/rmap.h>
46463 #include <linux/swap.h>
46464 #include <linux/swapops.h>
46465 +#include <linux/grsecurity.h>
46466
46467 #include <asm/elf.h>
46468 #include <asm/uaccess.h>
46469 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46470 "VmExe:\t%8lu kB\n"
46471 "VmLib:\t%8lu kB\n"
46472 "VmPTE:\t%8lu kB\n"
46473 - "VmSwap:\t%8lu kB\n",
46474 - hiwater_vm << (PAGE_SHIFT-10),
46475 + "VmSwap:\t%8lu kB\n"
46476 +
46477 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46478 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46479 +#endif
46480 +
46481 + ,hiwater_vm << (PAGE_SHIFT-10),
46482 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46483 mm->locked_vm << (PAGE_SHIFT-10),
46484 mm->pinned_vm << (PAGE_SHIFT-10),
46485 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46486 data << (PAGE_SHIFT-10),
46487 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46488 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46489 - swap << (PAGE_SHIFT-10));
46490 + swap << (PAGE_SHIFT-10)
46491 +
46492 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46493 + , mm->context.user_cs_base, mm->context.user_cs_limit
46494 +#endif
46495 +
46496 + );
46497 }
46498
46499 unsigned long task_vsize(struct mm_struct *mm)
46500 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46501 return ret;
46502 }
46503
46504 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46505 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46506 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46507 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46508 +#endif
46509 +
46510 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46511 {
46512 struct mm_struct *mm = vma->vm_mm;
46513 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46514 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46515 }
46516
46517 - /* We don't show the stack guard page in /proc/maps */
46518 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46519 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46520 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46521 +#else
46522 start = vma->vm_start;
46523 - if (stack_guard_page_start(vma, start))
46524 - start += PAGE_SIZE;
46525 end = vma->vm_end;
46526 - if (stack_guard_page_end(vma, end))
46527 - end -= PAGE_SIZE;
46528 +#endif
46529
46530 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46531 start,
46532 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46533 flags & VM_WRITE ? 'w' : '-',
46534 flags & VM_EXEC ? 'x' : '-',
46535 flags & VM_MAYSHARE ? 's' : 'p',
46536 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46537 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46538 +#else
46539 pgoff,
46540 +#endif
46541 MAJOR(dev), MINOR(dev), ino, &len);
46542
46543 /*
46544 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46545 */
46546 if (file) {
46547 pad_len_spaces(m, len);
46548 - seq_path(m, &file->f_path, "\n");
46549 + seq_path(m, &file->f_path, "\n\\");
46550 } else {
46551 const char *name = arch_vma_name(vma);
46552 if (!name) {
46553 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46554 if (vma->vm_start <= mm->brk &&
46555 vma->vm_end >= mm->start_brk) {
46556 name = "[heap]";
46557 - } else if (vma->vm_start <= mm->start_stack &&
46558 - vma->vm_end >= mm->start_stack) {
46559 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46560 + (vma->vm_start <= mm->start_stack &&
46561 + vma->vm_end >= mm->start_stack)) {
46562 name = "[stack]";
46563 }
46564 } else {
46565 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46566 struct proc_maps_private *priv = m->private;
46567 struct task_struct *task = priv->task;
46568
46569 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46570 + if (current->exec_id != m->exec_id) {
46571 + gr_log_badprocpid("maps");
46572 + return 0;
46573 + }
46574 +#endif
46575 +
46576 show_map_vma(m, vma);
46577
46578 if (m->count < m->size) /* vma is copied successfully */
46579 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46580 .private = &mss,
46581 };
46582
46583 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46584 + if (current->exec_id != m->exec_id) {
46585 + gr_log_badprocpid("smaps");
46586 + return 0;
46587 + }
46588 +#endif
46589 memset(&mss, 0, sizeof mss);
46590 - mss.vma = vma;
46591 - /* mmap_sem is held in m_start */
46592 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46593 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46594 -
46595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46596 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46597 +#endif
46598 + mss.vma = vma;
46599 + /* mmap_sem is held in m_start */
46600 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46601 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46602 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46603 + }
46604 +#endif
46605 show_map_vma(m, vma);
46606
46607 seq_printf(m,
46608 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46609 "KernelPageSize: %8lu kB\n"
46610 "MMUPageSize: %8lu kB\n"
46611 "Locked: %8lu kB\n",
46612 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46613 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46614 +#else
46615 (vma->vm_end - vma->vm_start) >> 10,
46616 +#endif
46617 mss.resident >> 10,
46618 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46619 mss.shared_clean >> 10,
46620 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46621 int n;
46622 char buffer[50];
46623
46624 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46625 + if (current->exec_id != m->exec_id) {
46626 + gr_log_badprocpid("numa_maps");
46627 + return 0;
46628 + }
46629 +#endif
46630 +
46631 if (!mm)
46632 return 0;
46633
46634 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46635 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46636 mpol_cond_put(pol);
46637
46638 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46639 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46640 +#else
46641 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46642 +#endif
46643
46644 if (file) {
46645 seq_printf(m, " file=");
46646 - seq_path(m, &file->f_path, "\n\t= ");
46647 + seq_path(m, &file->f_path, "\n\t\\= ");
46648 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46649 seq_printf(m, " heap");
46650 } else if (vma->vm_start <= mm->start_stack &&
46651 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46652 index 980de54..2a4db5f 100644
46653 --- a/fs/proc/task_nommu.c
46654 +++ b/fs/proc/task_nommu.c
46655 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46656 else
46657 bytes += kobjsize(mm);
46658
46659 - if (current->fs && current->fs->users > 1)
46660 + if (current->fs && atomic_read(&current->fs->users) > 1)
46661 sbytes += kobjsize(current->fs);
46662 else
46663 bytes += kobjsize(current->fs);
46664 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46665
46666 if (file) {
46667 pad_len_spaces(m, len);
46668 - seq_path(m, &file->f_path, "");
46669 + seq_path(m, &file->f_path, "\n\\");
46670 } else if (mm) {
46671 if (vma->vm_start <= mm->start_stack &&
46672 vma->vm_end >= mm->start_stack) {
46673 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46674 index d67908b..d13f6a6 100644
46675 --- a/fs/quota/netlink.c
46676 +++ b/fs/quota/netlink.c
46677 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46678 void quota_send_warning(short type, unsigned int id, dev_t dev,
46679 const char warntype)
46680 {
46681 - static atomic_t seq;
46682 + static atomic_unchecked_t seq;
46683 struct sk_buff *skb;
46684 void *msg_head;
46685 int ret;
46686 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46687 "VFS: Not enough memory to send quota warning.\n");
46688 return;
46689 }
46690 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46691 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46692 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46693 if (!msg_head) {
46694 printk(KERN_ERR
46695 diff --git a/fs/readdir.c b/fs/readdir.c
46696 index 356f715..c918d38 100644
46697 --- a/fs/readdir.c
46698 +++ b/fs/readdir.c
46699 @@ -17,6 +17,7 @@
46700 #include <linux/security.h>
46701 #include <linux/syscalls.h>
46702 #include <linux/unistd.h>
46703 +#include <linux/namei.h>
46704
46705 #include <asm/uaccess.h>
46706
46707 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46708
46709 struct readdir_callback {
46710 struct old_linux_dirent __user * dirent;
46711 + struct file * file;
46712 int result;
46713 };
46714
46715 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46716 buf->result = -EOVERFLOW;
46717 return -EOVERFLOW;
46718 }
46719 +
46720 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46721 + return 0;
46722 +
46723 buf->result++;
46724 dirent = buf->dirent;
46725 if (!access_ok(VERIFY_WRITE, dirent,
46726 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46727
46728 buf.result = 0;
46729 buf.dirent = dirent;
46730 + buf.file = file;
46731
46732 error = vfs_readdir(file, fillonedir, &buf);
46733 if (buf.result)
46734 @@ -142,6 +149,7 @@ struct linux_dirent {
46735 struct getdents_callback {
46736 struct linux_dirent __user * current_dir;
46737 struct linux_dirent __user * previous;
46738 + struct file * file;
46739 int count;
46740 int error;
46741 };
46742 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46743 buf->error = -EOVERFLOW;
46744 return -EOVERFLOW;
46745 }
46746 +
46747 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46748 + return 0;
46749 +
46750 dirent = buf->previous;
46751 if (dirent) {
46752 if (__put_user(offset, &dirent->d_off))
46753 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46754 buf.previous = NULL;
46755 buf.count = count;
46756 buf.error = 0;
46757 + buf.file = file;
46758
46759 error = vfs_readdir(file, filldir, &buf);
46760 if (error >= 0)
46761 @@ -229,6 +242,7 @@ out:
46762 struct getdents_callback64 {
46763 struct linux_dirent64 __user * current_dir;
46764 struct linux_dirent64 __user * previous;
46765 + struct file *file;
46766 int count;
46767 int error;
46768 };
46769 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46770 buf->error = -EINVAL; /* only used if we fail.. */
46771 if (reclen > buf->count)
46772 return -EINVAL;
46773 +
46774 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46775 + return 0;
46776 +
46777 dirent = buf->previous;
46778 if (dirent) {
46779 if (__put_user(offset, &dirent->d_off))
46780 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46781
46782 buf.current_dir = dirent;
46783 buf.previous = NULL;
46784 + buf.file = file;
46785 buf.count = count;
46786 buf.error = 0;
46787
46788 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46789 error = buf.error;
46790 lastdirent = buf.previous;
46791 if (lastdirent) {
46792 - typeof(lastdirent->d_off) d_off = file->f_pos;
46793 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46794 if (__put_user(d_off, &lastdirent->d_off))
46795 error = -EFAULT;
46796 else
46797 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46798 index 60c0804..d814f98 100644
46799 --- a/fs/reiserfs/do_balan.c
46800 +++ b/fs/reiserfs/do_balan.c
46801 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46802 return;
46803 }
46804
46805 - atomic_inc(&(fs_generation(tb->tb_sb)));
46806 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46807 do_balance_starts(tb);
46808
46809 /* balance leaf returns 0 except if combining L R and S into
46810 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46811 index 7a99811..a7c96c4 100644
46812 --- a/fs/reiserfs/procfs.c
46813 +++ b/fs/reiserfs/procfs.c
46814 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46815 "SMALL_TAILS " : "NO_TAILS ",
46816 replay_only(sb) ? "REPLAY_ONLY " : "",
46817 convert_reiserfs(sb) ? "CONV " : "",
46818 - atomic_read(&r->s_generation_counter),
46819 + atomic_read_unchecked(&r->s_generation_counter),
46820 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46821 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46822 SF(s_good_search_by_key_reada), SF(s_bmaps),
46823 diff --git a/fs/select.c b/fs/select.c
46824 index d33418f..2a5345e 100644
46825 --- a/fs/select.c
46826 +++ b/fs/select.c
46827 @@ -20,6 +20,7 @@
46828 #include <linux/module.h>
46829 #include <linux/slab.h>
46830 #include <linux/poll.h>
46831 +#include <linux/security.h>
46832 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46833 #include <linux/file.h>
46834 #include <linux/fdtable.h>
46835 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46836 struct poll_list *walk = head;
46837 unsigned long todo = nfds;
46838
46839 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46840 if (nfds > rlimit(RLIMIT_NOFILE))
46841 return -EINVAL;
46842
46843 diff --git a/fs/seq_file.c b/fs/seq_file.c
46844 index dba43c3..9fb8511 100644
46845 --- a/fs/seq_file.c
46846 +++ b/fs/seq_file.c
46847 @@ -9,6 +9,7 @@
46848 #include <linux/module.h>
46849 #include <linux/seq_file.h>
46850 #include <linux/slab.h>
46851 +#include <linux/sched.h>
46852
46853 #include <asm/uaccess.h>
46854 #include <asm/page.h>
46855 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46856 memset(p, 0, sizeof(*p));
46857 mutex_init(&p->lock);
46858 p->op = op;
46859 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46860 + p->exec_id = current->exec_id;
46861 +#endif
46862
46863 /*
46864 * Wrappers around seq_open(e.g. swaps_open) need to be
46865 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46866 return 0;
46867 }
46868 if (!m->buf) {
46869 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46870 + m->size = PAGE_SIZE;
46871 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46872 if (!m->buf)
46873 return -ENOMEM;
46874 }
46875 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46876 Eoverflow:
46877 m->op->stop(m, p);
46878 kfree(m->buf);
46879 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46880 + m->size <<= 1;
46881 + m->buf = kmalloc(m->size, GFP_KERNEL);
46882 return !m->buf ? -ENOMEM : -EAGAIN;
46883 }
46884
46885 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46886 m->version = file->f_version;
46887 /* grab buffer if we didn't have one */
46888 if (!m->buf) {
46889 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46890 + m->size = PAGE_SIZE;
46891 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46892 if (!m->buf)
46893 goto Enomem;
46894 }
46895 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46896 goto Fill;
46897 m->op->stop(m, p);
46898 kfree(m->buf);
46899 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46900 + m->size <<= 1;
46901 + m->buf = kmalloc(m->size, GFP_KERNEL);
46902 if (!m->buf)
46903 goto Enomem;
46904 m->count = 0;
46905 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46906 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46907 void *data)
46908 {
46909 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46910 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46911 int res = -ENOMEM;
46912
46913 if (op) {
46914 diff --git a/fs/splice.c b/fs/splice.c
46915 index fa2defa..8601650 100644
46916 --- a/fs/splice.c
46917 +++ b/fs/splice.c
46918 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46919 pipe_lock(pipe);
46920
46921 for (;;) {
46922 - if (!pipe->readers) {
46923 + if (!atomic_read(&pipe->readers)) {
46924 send_sig(SIGPIPE, current, 0);
46925 if (!ret)
46926 ret = -EPIPE;
46927 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46928 do_wakeup = 0;
46929 }
46930
46931 - pipe->waiting_writers++;
46932 + atomic_inc(&pipe->waiting_writers);
46933 pipe_wait(pipe);
46934 - pipe->waiting_writers--;
46935 + atomic_dec(&pipe->waiting_writers);
46936 }
46937
46938 pipe_unlock(pipe);
46939 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46940 old_fs = get_fs();
46941 set_fs(get_ds());
46942 /* The cast to a user pointer is valid due to the set_fs() */
46943 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46944 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46945 set_fs(old_fs);
46946
46947 return res;
46948 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46949 old_fs = get_fs();
46950 set_fs(get_ds());
46951 /* The cast to a user pointer is valid due to the set_fs() */
46952 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46953 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46954 set_fs(old_fs);
46955
46956 return res;
46957 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46958 goto err;
46959
46960 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46961 - vec[i].iov_base = (void __user *) page_address(page);
46962 + vec[i].iov_base = (void __force_user *) page_address(page);
46963 vec[i].iov_len = this_len;
46964 spd.pages[i] = page;
46965 spd.nr_pages++;
46966 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46967 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46968 {
46969 while (!pipe->nrbufs) {
46970 - if (!pipe->writers)
46971 + if (!atomic_read(&pipe->writers))
46972 return 0;
46973
46974 - if (!pipe->waiting_writers && sd->num_spliced)
46975 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46976 return 0;
46977
46978 if (sd->flags & SPLICE_F_NONBLOCK)
46979 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46980 * out of the pipe right after the splice_to_pipe(). So set
46981 * PIPE_READERS appropriately.
46982 */
46983 - pipe->readers = 1;
46984 + atomic_set(&pipe->readers, 1);
46985
46986 current->splice_pipe = pipe;
46987 }
46988 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46989 ret = -ERESTARTSYS;
46990 break;
46991 }
46992 - if (!pipe->writers)
46993 + if (!atomic_read(&pipe->writers))
46994 break;
46995 - if (!pipe->waiting_writers) {
46996 + if (!atomic_read(&pipe->waiting_writers)) {
46997 if (flags & SPLICE_F_NONBLOCK) {
46998 ret = -EAGAIN;
46999 break;
47000 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47001 pipe_lock(pipe);
47002
47003 while (pipe->nrbufs >= pipe->buffers) {
47004 - if (!pipe->readers) {
47005 + if (!atomic_read(&pipe->readers)) {
47006 send_sig(SIGPIPE, current, 0);
47007 ret = -EPIPE;
47008 break;
47009 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
47010 ret = -ERESTARTSYS;
47011 break;
47012 }
47013 - pipe->waiting_writers++;
47014 + atomic_inc(&pipe->waiting_writers);
47015 pipe_wait(pipe);
47016 - pipe->waiting_writers--;
47017 + atomic_dec(&pipe->waiting_writers);
47018 }
47019
47020 pipe_unlock(pipe);
47021 @@ -1819,14 +1819,14 @@ retry:
47022 pipe_double_lock(ipipe, opipe);
47023
47024 do {
47025 - if (!opipe->readers) {
47026 + if (!atomic_read(&opipe->readers)) {
47027 send_sig(SIGPIPE, current, 0);
47028 if (!ret)
47029 ret = -EPIPE;
47030 break;
47031 }
47032
47033 - if (!ipipe->nrbufs && !ipipe->writers)
47034 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
47035 break;
47036
47037 /*
47038 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47039 pipe_double_lock(ipipe, opipe);
47040
47041 do {
47042 - if (!opipe->readers) {
47043 + if (!atomic_read(&opipe->readers)) {
47044 send_sig(SIGPIPE, current, 0);
47045 if (!ret)
47046 ret = -EPIPE;
47047 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
47048 * return EAGAIN if we have the potential of some data in the
47049 * future, otherwise just return 0
47050 */
47051 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
47052 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
47053 ret = -EAGAIN;
47054
47055 pipe_unlock(ipipe);
47056 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
47057 index 7fdf6a7..e6cd8ad 100644
47058 --- a/fs/sysfs/dir.c
47059 +++ b/fs/sysfs/dir.c
47060 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
47061 struct sysfs_dirent *sd;
47062 int rc;
47063
47064 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47065 + const char *parent_name = parent_sd->s_name;
47066 +
47067 + mode = S_IFDIR | S_IRWXU;
47068 +
47069 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
47070 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
47071 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
47072 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
47073 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
47074 +#endif
47075 +
47076 /* allocate */
47077 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
47078 if (!sd)
47079 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
47080 index 779789a..f58193c 100644
47081 --- a/fs/sysfs/file.c
47082 +++ b/fs/sysfs/file.c
47083 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
47084
47085 struct sysfs_open_dirent {
47086 atomic_t refcnt;
47087 - atomic_t event;
47088 + atomic_unchecked_t event;
47089 wait_queue_head_t poll;
47090 struct list_head buffers; /* goes through sysfs_buffer.list */
47091 };
47092 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
47093 if (!sysfs_get_active(attr_sd))
47094 return -ENODEV;
47095
47096 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
47097 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
47098 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
47099
47100 sysfs_put_active(attr_sd);
47101 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
47102 return -ENOMEM;
47103
47104 atomic_set(&new_od->refcnt, 0);
47105 - atomic_set(&new_od->event, 1);
47106 + atomic_set_unchecked(&new_od->event, 1);
47107 init_waitqueue_head(&new_od->poll);
47108 INIT_LIST_HEAD(&new_od->buffers);
47109 goto retry;
47110 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
47111
47112 sysfs_put_active(attr_sd);
47113
47114 - if (buffer->event != atomic_read(&od->event))
47115 + if (buffer->event != atomic_read_unchecked(&od->event))
47116 goto trigger;
47117
47118 return DEFAULT_POLLMASK;
47119 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
47120
47121 od = sd->s_attr.open;
47122 if (od) {
47123 - atomic_inc(&od->event);
47124 + atomic_inc_unchecked(&od->event);
47125 wake_up_interruptible(&od->poll);
47126 }
47127
47128 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
47129 index a7ac78f..02158e1 100644
47130 --- a/fs/sysfs/symlink.c
47131 +++ b/fs/sysfs/symlink.c
47132 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47133
47134 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47135 {
47136 - char *page = nd_get_link(nd);
47137 + const char *page = nd_get_link(nd);
47138 if (!IS_ERR(page))
47139 free_page((unsigned long)page);
47140 }
47141 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
47142 index c175b4d..8f36a16 100644
47143 --- a/fs/udf/misc.c
47144 +++ b/fs/udf/misc.c
47145 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47146
47147 u8 udf_tag_checksum(const struct tag *t)
47148 {
47149 - u8 *data = (u8 *)t;
47150 + const u8 *data = (const u8 *)t;
47151 u8 checksum = 0;
47152 int i;
47153 for (i = 0; i < sizeof(struct tag); ++i)
47154 diff --git a/fs/utimes.c b/fs/utimes.c
47155 index ba653f3..06ea4b1 100644
47156 --- a/fs/utimes.c
47157 +++ b/fs/utimes.c
47158 @@ -1,6 +1,7 @@
47159 #include <linux/compiler.h>
47160 #include <linux/file.h>
47161 #include <linux/fs.h>
47162 +#include <linux/security.h>
47163 #include <linux/linkage.h>
47164 #include <linux/mount.h>
47165 #include <linux/namei.h>
47166 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47167 goto mnt_drop_write_and_out;
47168 }
47169 }
47170 +
47171 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47172 + error = -EACCES;
47173 + goto mnt_drop_write_and_out;
47174 + }
47175 +
47176 mutex_lock(&inode->i_mutex);
47177 error = notify_change(path->dentry, &newattrs);
47178 mutex_unlock(&inode->i_mutex);
47179 diff --git a/fs/xattr.c b/fs/xattr.c
47180 index 67583de..c5aad14 100644
47181 --- a/fs/xattr.c
47182 +++ b/fs/xattr.c
47183 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47184 * Extended attribute SET operations
47185 */
47186 static long
47187 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47188 +setxattr(struct path *path, const char __user *name, const void __user *value,
47189 size_t size, int flags)
47190 {
47191 int error;
47192 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47193 return PTR_ERR(kvalue);
47194 }
47195
47196 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47197 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47198 + error = -EACCES;
47199 + goto out;
47200 + }
47201 +
47202 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47203 +out:
47204 kfree(kvalue);
47205 return error;
47206 }
47207 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47208 return error;
47209 error = mnt_want_write(path.mnt);
47210 if (!error) {
47211 - error = setxattr(path.dentry, name, value, size, flags);
47212 + error = setxattr(&path, name, value, size, flags);
47213 mnt_drop_write(path.mnt);
47214 }
47215 path_put(&path);
47216 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47217 return error;
47218 error = mnt_want_write(path.mnt);
47219 if (!error) {
47220 - error = setxattr(path.dentry, name, value, size, flags);
47221 + error = setxattr(&path, name, value, size, flags);
47222 mnt_drop_write(path.mnt);
47223 }
47224 path_put(&path);
47225 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47226 const void __user *,value, size_t, size, int, flags)
47227 {
47228 struct file *f;
47229 - struct dentry *dentry;
47230 int error = -EBADF;
47231
47232 f = fget(fd);
47233 if (!f)
47234 return error;
47235 - dentry = f->f_path.dentry;
47236 - audit_inode(NULL, dentry);
47237 + audit_inode(NULL, f->f_path.dentry);
47238 error = mnt_want_write_file(f);
47239 if (!error) {
47240 - error = setxattr(dentry, name, value, size, flags);
47241 + error = setxattr(&f->f_path, name, value, size, flags);
47242 mnt_drop_write(f->f_path.mnt);
47243 }
47244 fput(f);
47245 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47246 index 8d5a506..7f62712 100644
47247 --- a/fs/xattr_acl.c
47248 +++ b/fs/xattr_acl.c
47249 @@ -17,8 +17,8 @@
47250 struct posix_acl *
47251 posix_acl_from_xattr(const void *value, size_t size)
47252 {
47253 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47254 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47255 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47256 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47257 int count;
47258 struct posix_acl *acl;
47259 struct posix_acl_entry *acl_e;
47260 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47261 index d0ab788..827999b 100644
47262 --- a/fs/xfs/xfs_bmap.c
47263 +++ b/fs/xfs/xfs_bmap.c
47264 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47265 int nmap,
47266 int ret_nmap);
47267 #else
47268 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47269 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47270 #endif /* DEBUG */
47271
47272 STATIC int
47273 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47274 index 79d05e8..e3e5861 100644
47275 --- a/fs/xfs/xfs_dir2_sf.c
47276 +++ b/fs/xfs/xfs_dir2_sf.c
47277 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47278 }
47279
47280 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47281 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47282 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47283 + char name[sfep->namelen];
47284 + memcpy(name, sfep->name, sfep->namelen);
47285 + if (filldir(dirent, name, sfep->namelen,
47286 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47287 + *offset = off & 0x7fffffff;
47288 + return 0;
47289 + }
47290 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47291 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47292 *offset = off & 0x7fffffff;
47293 return 0;
47294 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47295 index d99a905..9f88202 100644
47296 --- a/fs/xfs/xfs_ioctl.c
47297 +++ b/fs/xfs/xfs_ioctl.c
47298 @@ -128,7 +128,7 @@ xfs_find_handle(
47299 }
47300
47301 error = -EFAULT;
47302 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47303 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47304 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47305 goto out_put;
47306
47307 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47308 index 23ce927..e274cc1 100644
47309 --- a/fs/xfs/xfs_iops.c
47310 +++ b/fs/xfs/xfs_iops.c
47311 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47312 struct nameidata *nd,
47313 void *p)
47314 {
47315 - char *s = nd_get_link(nd);
47316 + const char *s = nd_get_link(nd);
47317
47318 if (!IS_ERR(s))
47319 kfree(s);
47320 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47321 new file mode 100644
47322 index 0000000..41df561
47323 --- /dev/null
47324 +++ b/grsecurity/Kconfig
47325 @@ -0,0 +1,1075 @@
47326 +#
47327 +# grecurity configuration
47328 +#
47329 +
47330 +menu "Grsecurity"
47331 +
47332 +config GRKERNSEC
47333 + bool "Grsecurity"
47334 + select CRYPTO
47335 + select CRYPTO_SHA256
47336 + help
47337 + If you say Y here, you will be able to configure many features
47338 + that will enhance the security of your system. It is highly
47339 + recommended that you say Y here and read through the help
47340 + for each option so that you fully understand the features and
47341 + can evaluate their usefulness for your machine.
47342 +
47343 +choice
47344 + prompt "Security Level"
47345 + depends on GRKERNSEC
47346 + default GRKERNSEC_CUSTOM
47347 +
47348 +config GRKERNSEC_LOW
47349 + bool "Low"
47350 + select GRKERNSEC_LINK
47351 + select GRKERNSEC_FIFO
47352 + select GRKERNSEC_RANDNET
47353 + select GRKERNSEC_DMESG
47354 + select GRKERNSEC_CHROOT
47355 + select GRKERNSEC_CHROOT_CHDIR
47356 +
47357 + help
47358 + If you choose this option, several of the grsecurity options will
47359 + be enabled that will give you greater protection against a number
47360 + of attacks, while assuring that none of your software will have any
47361 + conflicts with the additional security measures. If you run a lot
47362 + of unusual software, or you are having problems with the higher
47363 + security levels, you should say Y here. With this option, the
47364 + following features are enabled:
47365 +
47366 + - Linking restrictions
47367 + - FIFO restrictions
47368 + - Restricted dmesg
47369 + - Enforced chdir("/") on chroot
47370 + - Runtime module disabling
47371 +
47372 +config GRKERNSEC_MEDIUM
47373 + bool "Medium"
47374 + select PAX
47375 + select PAX_EI_PAX
47376 + select PAX_PT_PAX_FLAGS
47377 + select PAX_HAVE_ACL_FLAGS
47378 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47379 + select GRKERNSEC_CHROOT
47380 + select GRKERNSEC_CHROOT_SYSCTL
47381 + select GRKERNSEC_LINK
47382 + select GRKERNSEC_FIFO
47383 + select GRKERNSEC_DMESG
47384 + select GRKERNSEC_RANDNET
47385 + select GRKERNSEC_FORKFAIL
47386 + select GRKERNSEC_TIME
47387 + select GRKERNSEC_SIGNAL
47388 + select GRKERNSEC_CHROOT
47389 + select GRKERNSEC_CHROOT_UNIX
47390 + select GRKERNSEC_CHROOT_MOUNT
47391 + select GRKERNSEC_CHROOT_PIVOT
47392 + select GRKERNSEC_CHROOT_DOUBLE
47393 + select GRKERNSEC_CHROOT_CHDIR
47394 + select GRKERNSEC_CHROOT_MKNOD
47395 + select GRKERNSEC_PROC
47396 + select GRKERNSEC_PROC_USERGROUP
47397 + select PAX_RANDUSTACK
47398 + select PAX_ASLR
47399 + select PAX_RANDMMAP
47400 + select PAX_REFCOUNT if (X86 || SPARC64)
47401 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47402 +
47403 + help
47404 + If you say Y here, several features in addition to those included
47405 + in the low additional security level will be enabled. These
47406 + features provide even more security to your system, though in rare
47407 + cases they may be incompatible with very old or poorly written
47408 + software. If you enable this option, make sure that your auth
47409 + service (identd) is running as gid 1001. With this option,
47410 + the following features (in addition to those provided in the
47411 + low additional security level) will be enabled:
47412 +
47413 + - Failed fork logging
47414 + - Time change logging
47415 + - Signal logging
47416 + - Deny mounts in chroot
47417 + - Deny double chrooting
47418 + - Deny sysctl writes in chroot
47419 + - Deny mknod in chroot
47420 + - Deny access to abstract AF_UNIX sockets out of chroot
47421 + - Deny pivot_root in chroot
47422 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47423 + - /proc restrictions with special GID set to 10 (usually wheel)
47424 + - Address Space Layout Randomization (ASLR)
47425 + - Prevent exploitation of most refcount overflows
47426 + - Bounds checking of copying between the kernel and userland
47427 +
47428 +config GRKERNSEC_HIGH
47429 + bool "High"
47430 + select GRKERNSEC_LINK
47431 + select GRKERNSEC_FIFO
47432 + select GRKERNSEC_DMESG
47433 + select GRKERNSEC_FORKFAIL
47434 + select GRKERNSEC_TIME
47435 + select GRKERNSEC_SIGNAL
47436 + select GRKERNSEC_CHROOT
47437 + select GRKERNSEC_CHROOT_SHMAT
47438 + select GRKERNSEC_CHROOT_UNIX
47439 + select GRKERNSEC_CHROOT_MOUNT
47440 + select GRKERNSEC_CHROOT_FCHDIR
47441 + select GRKERNSEC_CHROOT_PIVOT
47442 + select GRKERNSEC_CHROOT_DOUBLE
47443 + select GRKERNSEC_CHROOT_CHDIR
47444 + select GRKERNSEC_CHROOT_MKNOD
47445 + select GRKERNSEC_CHROOT_CAPS
47446 + select GRKERNSEC_CHROOT_SYSCTL
47447 + select GRKERNSEC_CHROOT_FINDTASK
47448 + select GRKERNSEC_SYSFS_RESTRICT
47449 + select GRKERNSEC_PROC
47450 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47451 + select GRKERNSEC_HIDESYM
47452 + select GRKERNSEC_BRUTE
47453 + select GRKERNSEC_PROC_USERGROUP
47454 + select GRKERNSEC_KMEM
47455 + select GRKERNSEC_RESLOG
47456 + select GRKERNSEC_RANDNET
47457 + select GRKERNSEC_PROC_ADD
47458 + select GRKERNSEC_CHROOT_CHMOD
47459 + select GRKERNSEC_CHROOT_NICE
47460 + select GRKERNSEC_SETXID
47461 + select GRKERNSEC_AUDIT_MOUNT
47462 + select GRKERNSEC_MODHARDEN if (MODULES)
47463 + select GRKERNSEC_HARDEN_PTRACE
47464 + select GRKERNSEC_PTRACE_READEXEC
47465 + select GRKERNSEC_VM86 if (X86_32)
47466 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47467 + select PAX
47468 + select PAX_RANDUSTACK
47469 + select PAX_ASLR
47470 + select PAX_RANDMMAP
47471 + select PAX_NOEXEC
47472 + select PAX_MPROTECT
47473 + select PAX_EI_PAX
47474 + select PAX_PT_PAX_FLAGS
47475 + select PAX_HAVE_ACL_FLAGS
47476 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47477 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47478 + select PAX_RANDKSTACK if (X86_TSC && X86)
47479 + select PAX_SEGMEXEC if (X86_32)
47480 + select PAX_PAGEEXEC
47481 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47482 + select PAX_EMUTRAMP if (PARISC)
47483 + select PAX_EMUSIGRT if (PARISC)
47484 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47485 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47486 + select PAX_REFCOUNT if (X86 || SPARC64)
47487 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47488 + help
47489 + If you say Y here, many of the features of grsecurity will be
47490 + enabled, which will protect you against many kinds of attacks
47491 + against your system. The heightened security comes at a cost
47492 + of an increased chance of incompatibilities with rare software
47493 + on your machine. Since this security level enables PaX, you should
47494 + view <http://pax.grsecurity.net> and read about the PaX
47495 + project. While you are there, download chpax and run it on
47496 + binaries that cause problems with PaX. Also remember that
47497 + since the /proc restrictions are enabled, you must run your
47498 + identd as gid 1001. This security level enables the following
47499 + features in addition to those listed in the low and medium
47500 + security levels:
47501 +
47502 + - Additional /proc restrictions
47503 + - Chmod restrictions in chroot
47504 + - No signals, ptrace, or viewing of processes outside of chroot
47505 + - Capability restrictions in chroot
47506 + - Deny fchdir out of chroot
47507 + - Priority restrictions in chroot
47508 + - Segmentation-based implementation of PaX
47509 + - Mprotect restrictions
47510 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47511 + - Kernel stack randomization
47512 + - Mount/unmount/remount logging
47513 + - Kernel symbol hiding
47514 + - Hardening of module auto-loading
47515 + - Ptrace restrictions
47516 + - Restricted vm86 mode
47517 + - Restricted sysfs/debugfs
47518 + - Active kernel exploit response
47519 +
47520 +config GRKERNSEC_CUSTOM
47521 + bool "Custom"
47522 + help
47523 + If you say Y here, you will be able to configure every grsecurity
47524 + option, which allows you to enable many more features that aren't
47525 + covered in the basic security levels. These additional features
47526 + include TPE, socket restrictions, and the sysctl system for
47527 + grsecurity. It is advised that you read through the help for
47528 + each option to determine its usefulness in your situation.
47529 +
47530 +endchoice
47531 +
47532 +menu "Memory Protections"
47533 +depends on GRKERNSEC
47534 +
47535 +config GRKERNSEC_KMEM
47536 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47537 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47538 + help
47539 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47540 + be written to or read from to modify or leak the contents of the running
47541 + kernel. /dev/port will also not be allowed to be opened. If you have module
47542 + support disabled, enabling this will close up four ways that are
47543 + currently used to insert malicious code into the running kernel.
47544 + Even with all these features enabled, we still highly recommend that
47545 + you use the RBAC system, as it is still possible for an attacker to
47546 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47547 + If you are not using XFree86, you may be able to stop this additional
47548 + case by enabling the 'Disable privileged I/O' option. Though nothing
47549 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47550 + but only to video memory, which is the only writing we allow in this
47551 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47552 + not be allowed to mprotect it with PROT_WRITE later.
47553 + It is highly recommended that you say Y here if you meet all the
47554 + conditions above.
47555 +
47556 +config GRKERNSEC_VM86
47557 + bool "Restrict VM86 mode"
47558 + depends on X86_32
47559 +
47560 + help
47561 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47562 + make use of a special execution mode on 32bit x86 processors called
47563 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47564 + video cards and will still work with this option enabled. The purpose
47565 + of the option is to prevent exploitation of emulation errors in
47566 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47567 + Nearly all users should be able to enable this option.
47568 +
47569 +config GRKERNSEC_IO
47570 + bool "Disable privileged I/O"
47571 + depends on X86
47572 + select RTC_CLASS
47573 + select RTC_INTF_DEV
47574 + select RTC_DRV_CMOS
47575 +
47576 + help
47577 + If you say Y here, all ioperm and iopl calls will return an error.
47578 + Ioperm and iopl can be used to modify the running kernel.
47579 + Unfortunately, some programs need this access to operate properly,
47580 + the most notable of which are XFree86 and hwclock. hwclock can be
47581 + remedied by having RTC support in the kernel, so real-time
47582 + clock support is enabled if this option is enabled, to ensure
47583 + that hwclock operates correctly. XFree86 still will not
47584 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47585 + IF YOU USE XFree86. If you use XFree86 and you still want to
47586 + protect your kernel against modification, use the RBAC system.
47587 +
47588 +config GRKERNSEC_PROC_MEMMAP
47589 + bool "Harden ASLR against information leaks and entropy reduction"
47590 + default y if (PAX_NOEXEC || PAX_ASLR)
47591 + depends on PAX_NOEXEC || PAX_ASLR
47592 + help
47593 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47594 + give no information about the addresses of its mappings if
47595 + PaX features that rely on random addresses are enabled on the task.
47596 + In addition to sanitizing this information and disabling other
47597 + dangerous sources of information, this option causes reads of sensitive
47598 + /proc/<pid> entries where the file descriptor was opened in a different
47599 + task than the one performing the read. Such attempts are logged.
47600 + Finally, this option limits argv/env strings for suid/sgid binaries
47601 + to 1MB to prevent a complete exhaustion of the stack entropy provided
47602 + by ASLR.
47603 + If you use PaX it is essential that you say Y here as it closes up
47604 + several holes that make full ASLR useless for suid/sgid binaries.
47605 +
47606 +config GRKERNSEC_BRUTE
47607 + bool "Deter exploit bruteforcing"
47608 + help
47609 + If you say Y here, attempts to bruteforce exploits against forking
47610 + daemons such as apache or sshd, as well as against suid/sgid binaries
47611 + will be deterred. When a child of a forking daemon is killed by PaX
47612 + or crashes due to an illegal instruction or other suspicious signal,
47613 + the parent process will be delayed 30 seconds upon every subsequent
47614 + fork until the administrator is able to assess the situation and
47615 + restart the daemon.
47616 + In the suid/sgid case, the attempt is logged, the user has all their
47617 + processes terminated, and they are prevented from executing any further
47618 + processes for 15 minutes.
47619 + It is recommended that you also enable signal logging in the auditing
47620 + section so that logs are generated when a process triggers a suspicious
47621 + signal.
47622 + If the sysctl option is enabled, a sysctl option with name
47623 + "deter_bruteforce" is created.
47624 +
47625 +
47626 +config GRKERNSEC_MODHARDEN
47627 + bool "Harden module auto-loading"
47628 + depends on MODULES
47629 + help
47630 + If you say Y here, module auto-loading in response to use of some
47631 + feature implemented by an unloaded module will be restricted to
47632 + root users. Enabling this option helps defend against attacks
47633 + by unprivileged users who abuse the auto-loading behavior to
47634 + cause a vulnerable module to load that is then exploited.
47635 +
47636 + If this option prevents a legitimate use of auto-loading for a
47637 + non-root user, the administrator can execute modprobe manually
47638 + with the exact name of the module mentioned in the alert log.
47639 + Alternatively, the administrator can add the module to the list
47640 + of modules loaded at boot by modifying init scripts.
47641 +
47642 + Modification of init scripts will most likely be needed on
47643 + Ubuntu servers with encrypted home directory support enabled,
47644 + as the first non-root user logging in will cause the ecb(aes),
47645 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47646 +
47647 +config GRKERNSEC_HIDESYM
47648 + bool "Hide kernel symbols"
47649 + help
47650 + If you say Y here, getting information on loaded modules, and
47651 + displaying all kernel symbols through a syscall will be restricted
47652 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47653 + /proc/kallsyms will be restricted to the root user. The RBAC
47654 + system can hide that entry even from root.
47655 +
47656 + This option also prevents leaking of kernel addresses through
47657 + several /proc entries.
47658 +
47659 + Note that this option is only effective provided the following
47660 + conditions are met:
47661 + 1) The kernel using grsecurity is not precompiled by some distribution
47662 + 2) You have also enabled GRKERNSEC_DMESG
47663 + 3) You are using the RBAC system and hiding other files such as your
47664 + kernel image and System.map. Alternatively, enabling this option
47665 + causes the permissions on /boot, /lib/modules, and the kernel
47666 + source directory to change at compile time to prevent
47667 + reading by non-root users.
47668 + If the above conditions are met, this option will aid in providing a
47669 + useful protection against local kernel exploitation of overflows
47670 + and arbitrary read/write vulnerabilities.
47671 +
47672 +config GRKERNSEC_KERN_LOCKOUT
47673 + bool "Active kernel exploit response"
47674 + depends on X86 || ARM || PPC || SPARC
47675 + help
47676 + If you say Y here, when a PaX alert is triggered due to suspicious
47677 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47678 + or an OOPs occurs due to bad memory accesses, instead of just
47679 + terminating the offending process (and potentially allowing
47680 + a subsequent exploit from the same user), we will take one of two
47681 + actions:
47682 + If the user was root, we will panic the system
47683 + If the user was non-root, we will log the attempt, terminate
47684 + all processes owned by the user, then prevent them from creating
47685 + any new processes until the system is restarted
47686 + This deters repeated kernel exploitation/bruteforcing attempts
47687 + and is useful for later forensics.
47688 +
47689 +endmenu
47690 +menu "Role Based Access Control Options"
47691 +depends on GRKERNSEC
47692 +
47693 +config GRKERNSEC_RBAC_DEBUG
47694 + bool
47695 +
47696 +config GRKERNSEC_NO_RBAC
47697 + bool "Disable RBAC system"
47698 + help
47699 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47700 + preventing the RBAC system from being enabled. You should only say Y
47701 + here if you have no intention of using the RBAC system, so as to prevent
47702 + an attacker with root access from misusing the RBAC system to hide files
47703 + and processes when loadable module support and /dev/[k]mem have been
47704 + locked down.
47705 +
47706 +config GRKERNSEC_ACL_HIDEKERN
47707 + bool "Hide kernel processes"
47708 + help
47709 + If you say Y here, all kernel threads will be hidden to all
47710 + processes but those whose subject has the "view hidden processes"
47711 + flag.
47712 +
47713 +config GRKERNSEC_ACL_MAXTRIES
47714 + int "Maximum tries before password lockout"
47715 + default 3
47716 + help
47717 + This option enforces the maximum number of times a user can attempt
47718 + to authorize themselves with the grsecurity RBAC system before being
47719 + denied the ability to attempt authorization again for a specified time.
47720 + The lower the number, the harder it will be to brute-force a password.
47721 +
47722 +config GRKERNSEC_ACL_TIMEOUT
47723 + int "Time to wait after max password tries, in seconds"
47724 + default 30
47725 + help
47726 + This option specifies the time the user must wait after attempting to
47727 + authorize to the RBAC system with the maximum number of invalid
47728 + passwords. The higher the number, the harder it will be to brute-force
47729 + a password.
47730 +
47731 +endmenu
47732 +menu "Filesystem Protections"
47733 +depends on GRKERNSEC
47734 +
47735 +config GRKERNSEC_PROC
47736 + bool "Proc restrictions"
47737 + help
47738 + If you say Y here, the permissions of the /proc filesystem
47739 + will be altered to enhance system security and privacy. You MUST
47740 + choose either a user only restriction or a user and group restriction.
47741 + Depending upon the option you choose, you can either restrict users to
47742 + see only the processes they themselves run, or choose a group that can
47743 + view all processes and files normally restricted to root if you choose
47744 + the "restrict to user only" option. NOTE: If you're running identd as
47745 + a non-root user, you will have to run it as the group you specify here.
47746 +
47747 +config GRKERNSEC_PROC_USER
47748 + bool "Restrict /proc to user only"
47749 + depends on GRKERNSEC_PROC
47750 + help
47751 + If you say Y here, non-root users will only be able to view their own
47752 + processes, and restricts them from viewing network-related information,
47753 + and viewing kernel symbol and module information.
47754 +
47755 +config GRKERNSEC_PROC_USERGROUP
47756 + bool "Allow special group"
47757 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47758 + help
47759 + If you say Y here, you will be able to select a group that will be
47760 + able to view all processes and network-related information. If you've
47761 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47762 + remain hidden. This option is useful if you want to run identd as
47763 + a non-root user.
47764 +
47765 +config GRKERNSEC_PROC_GID
47766 + int "GID for special group"
47767 + depends on GRKERNSEC_PROC_USERGROUP
47768 + default 1001
47769 +
47770 +config GRKERNSEC_PROC_ADD
47771 + bool "Additional restrictions"
47772 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47773 + help
47774 + If you say Y here, additional restrictions will be placed on
47775 + /proc that keep normal users from viewing device information and
47776 + slabinfo information that could be useful for exploits.
47777 +
47778 +config GRKERNSEC_LINK
47779 + bool "Linking restrictions"
47780 + help
47781 + If you say Y here, /tmp race exploits will be prevented, since users
47782 + will no longer be able to follow symlinks owned by other users in
47783 + world-writable +t directories (e.g. /tmp), unless the owner of the
47784 + symlink is the owner of the directory. users will also not be
47785 + able to hardlink to files they do not own. If the sysctl option is
47786 + enabled, a sysctl option with name "linking_restrictions" is created.
47787 +
47788 +config GRKERNSEC_FIFO
47789 + bool "FIFO restrictions"
47790 + help
47791 + If you say Y here, users will not be able to write to FIFOs they don't
47792 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47793 + the FIFO is the same owner of the directory it's held in. If the sysctl
47794 + option is enabled, a sysctl option with name "fifo_restrictions" is
47795 + created.
47796 +
47797 +config GRKERNSEC_SYSFS_RESTRICT
47798 + bool "Sysfs/debugfs restriction"
47799 + depends on SYSFS
47800 + help
47801 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47802 + any filesystem normally mounted under it (e.g. debugfs) will be
47803 + mostly accessible only by root. These filesystems generally provide access
47804 + to hardware and debug information that isn't appropriate for unprivileged
47805 + users of the system. Sysfs and debugfs have also become a large source
47806 + of new vulnerabilities, ranging from infoleaks to local compromise.
47807 + There has been very little oversight with an eye toward security involved
47808 + in adding new exporters of information to these filesystems, so their
47809 + use is discouraged.
47810 + For reasons of compatibility, a few directories have been whitelisted
47811 + for access by non-root users:
47812 + /sys/fs/selinux
47813 + /sys/fs/fuse
47814 + /sys/devices/system/cpu
47815 +
47816 +config GRKERNSEC_ROFS
47817 + bool "Runtime read-only mount protection"
47818 + help
47819 + If you say Y here, a sysctl option with name "romount_protect" will
47820 + be created. By setting this option to 1 at runtime, filesystems
47821 + will be protected in the following ways:
47822 + * No new writable mounts will be allowed
47823 + * Existing read-only mounts won't be able to be remounted read/write
47824 + * Write operations will be denied on all block devices
47825 + This option acts independently of grsec_lock: once it is set to 1,
47826 + it cannot be turned off. Therefore, please be mindful of the resulting
47827 + behavior if this option is enabled in an init script on a read-only
47828 + filesystem. This feature is mainly intended for secure embedded systems.
47829 +
47830 +config GRKERNSEC_CHROOT
47831 + bool "Chroot jail restrictions"
47832 + help
47833 + If you say Y here, you will be able to choose several options that will
47834 + make breaking out of a chrooted jail much more difficult. If you
47835 + encounter no software incompatibilities with the following options, it
47836 + is recommended that you enable each one.
47837 +
47838 +config GRKERNSEC_CHROOT_MOUNT
47839 + bool "Deny mounts"
47840 + depends on GRKERNSEC_CHROOT
47841 + help
47842 + If you say Y here, processes inside a chroot will not be able to
47843 + mount or remount filesystems. If the sysctl option is enabled, a
47844 + sysctl option with name "chroot_deny_mount" is created.
47845 +
47846 +config GRKERNSEC_CHROOT_DOUBLE
47847 + bool "Deny double-chroots"
47848 + depends on GRKERNSEC_CHROOT
47849 + help
47850 + If you say Y here, processes inside a chroot will not be able to chroot
47851 + again outside the chroot. This is a widely used method of breaking
47852 + out of a chroot jail and should not be allowed. If the sysctl
47853 + option is enabled, a sysctl option with name
47854 + "chroot_deny_chroot" is created.
47855 +
47856 +config GRKERNSEC_CHROOT_PIVOT
47857 + bool "Deny pivot_root in chroot"
47858 + depends on GRKERNSEC_CHROOT
47859 + help
47860 + If you say Y here, processes inside a chroot will not be able to use
47861 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47862 + works similar to chroot in that it changes the root filesystem. This
47863 + function could be misused in a chrooted process to attempt to break out
47864 + of the chroot, and therefore should not be allowed. If the sysctl
47865 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47866 + created.
47867 +
47868 +config GRKERNSEC_CHROOT_CHDIR
47869 + bool "Enforce chdir(\"/\") on all chroots"
47870 + depends on GRKERNSEC_CHROOT
47871 + help
47872 + If you say Y here, the current working directory of all newly-chrooted
47873 + applications will be set to the the root directory of the chroot.
47874 + The man page on chroot(2) states:
47875 + Note that this call does not change the current working
47876 + directory, so that `.' can be outside the tree rooted at
47877 + `/'. In particular, the super-user can escape from a
47878 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47879 +
47880 + It is recommended that you say Y here, since it's not known to break
47881 + any software. If the sysctl option is enabled, a sysctl option with
47882 + name "chroot_enforce_chdir" is created.
47883 +
47884 +config GRKERNSEC_CHROOT_CHMOD
47885 + bool "Deny (f)chmod +s"
47886 + depends on GRKERNSEC_CHROOT
47887 + help
47888 + If you say Y here, processes inside a chroot will not be able to chmod
47889 + or fchmod files to make them have suid or sgid bits. This protects
47890 + against another published method of breaking a chroot. If the sysctl
47891 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47892 + created.
47893 +
47894 +config GRKERNSEC_CHROOT_FCHDIR
47895 + bool "Deny fchdir out of chroot"
47896 + depends on GRKERNSEC_CHROOT
47897 + help
47898 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47899 + to a file descriptor of the chrooting process that points to a directory
47900 + outside the filesystem will be stopped. If the sysctl option
47901 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47902 +
47903 +config GRKERNSEC_CHROOT_MKNOD
47904 + bool "Deny mknod"
47905 + depends on GRKERNSEC_CHROOT
47906 + help
47907 + If you say Y here, processes inside a chroot will not be allowed to
47908 + mknod. The problem with using mknod inside a chroot is that it
47909 + would allow an attacker to create a device entry that is the same
47910 + as one on the physical root of your system, which could range from
47911 + anything from the console device to a device for your harddrive (which
47912 + they could then use to wipe the drive or steal data). It is recommended
47913 + that you say Y here, unless you run into software incompatibilities.
47914 + If the sysctl option is enabled, a sysctl option with name
47915 + "chroot_deny_mknod" is created.
47916 +
47917 +config GRKERNSEC_CHROOT_SHMAT
47918 + bool "Deny shmat() out of chroot"
47919 + depends on GRKERNSEC_CHROOT
47920 + help
47921 + If you say Y here, processes inside a chroot will not be able to attach
47922 + to shared memory segments that were created outside of the chroot jail.
47923 + It is recommended that you say Y here. If the sysctl option is enabled,
47924 + a sysctl option with name "chroot_deny_shmat" is created.
47925 +
47926 +config GRKERNSEC_CHROOT_UNIX
47927 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47928 + depends on GRKERNSEC_CHROOT
47929 + help
47930 + If you say Y here, processes inside a chroot will not be able to
47931 + connect to abstract (meaning not belonging to a filesystem) Unix
47932 + domain sockets that were bound outside of a chroot. It is recommended
47933 + that you say Y here. If the sysctl option is enabled, a sysctl option
47934 + with name "chroot_deny_unix" is created.
47935 +
47936 +config GRKERNSEC_CHROOT_FINDTASK
47937 + bool "Protect outside processes"
47938 + depends on GRKERNSEC_CHROOT
47939 + help
47940 + If you say Y here, processes inside a chroot will not be able to
47941 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47942 + getsid, or view any process outside of the chroot. If the sysctl
47943 + option is enabled, a sysctl option with name "chroot_findtask" is
47944 + created.
47945 +
47946 +config GRKERNSEC_CHROOT_NICE
47947 + bool "Restrict priority changes"
47948 + depends on GRKERNSEC_CHROOT
47949 + help
47950 + If you say Y here, processes inside a chroot will not be able to raise
47951 + the priority of processes in the chroot, or alter the priority of
47952 + processes outside the chroot. This provides more security than simply
47953 + removing CAP_SYS_NICE from the process' capability set. If the
47954 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47955 + is created.
47956 +
47957 +config GRKERNSEC_CHROOT_SYSCTL
47958 + bool "Deny sysctl writes"
47959 + depends on GRKERNSEC_CHROOT
47960 + help
47961 + If you say Y here, an attacker in a chroot will not be able to
47962 + write to sysctl entries, either by sysctl(2) or through a /proc
47963 + interface. It is strongly recommended that you say Y here. If the
47964 + sysctl option is enabled, a sysctl option with name
47965 + "chroot_deny_sysctl" is created.
47966 +
47967 +config GRKERNSEC_CHROOT_CAPS
47968 + bool "Capability restrictions"
47969 + depends on GRKERNSEC_CHROOT
47970 + help
47971 + If you say Y here, the capabilities on all processes within a
47972 + chroot jail will be lowered to stop module insertion, raw i/o,
47973 + system and net admin tasks, rebooting the system, modifying immutable
47974 + files, modifying IPC owned by another, and changing the system time.
47975 + This is left an option because it can break some apps. Disable this
47976 + if your chrooted apps are having problems performing those kinds of
47977 + tasks. If the sysctl option is enabled, a sysctl option with
47978 + name "chroot_caps" is created.
47979 +
47980 +endmenu
47981 +menu "Kernel Auditing"
47982 +depends on GRKERNSEC
47983 +
47984 +config GRKERNSEC_AUDIT_GROUP
47985 + bool "Single group for auditing"
47986 + help
47987 + If you say Y here, the exec, chdir, and (un)mount logging features
47988 + will only operate on a group you specify. This option is recommended
47989 + if you only want to watch certain users instead of having a large
47990 + amount of logs from the entire system. If the sysctl option is enabled,
47991 + a sysctl option with name "audit_group" is created.
47992 +
47993 +config GRKERNSEC_AUDIT_GID
47994 + int "GID for auditing"
47995 + depends on GRKERNSEC_AUDIT_GROUP
47996 + default 1007
47997 +
47998 +config GRKERNSEC_EXECLOG
47999 + bool "Exec logging"
48000 + help
48001 + If you say Y here, all execve() calls will be logged (since the
48002 + other exec*() calls are frontends to execve(), all execution
48003 + will be logged). Useful for shell-servers that like to keep track
48004 + of their users. If the sysctl option is enabled, a sysctl option with
48005 + name "exec_logging" is created.
48006 + WARNING: This option when enabled will produce a LOT of logs, especially
48007 + on an active system.
48008 +
48009 +config GRKERNSEC_RESLOG
48010 + bool "Resource logging"
48011 + help
48012 + If you say Y here, all attempts to overstep resource limits will
48013 + be logged with the resource name, the requested size, and the current
48014 + limit. It is highly recommended that you say Y here. If the sysctl
48015 + option is enabled, a sysctl option with name "resource_logging" is
48016 + created. If the RBAC system is enabled, the sysctl value is ignored.
48017 +
48018 +config GRKERNSEC_CHROOT_EXECLOG
48019 + bool "Log execs within chroot"
48020 + help
48021 + If you say Y here, all executions inside a chroot jail will be logged
48022 + to syslog. This can cause a large amount of logs if certain
48023 + applications (eg. djb's daemontools) are installed on the system, and
48024 + is therefore left as an option. If the sysctl option is enabled, a
48025 + sysctl option with name "chroot_execlog" is created.
48026 +
48027 +config GRKERNSEC_AUDIT_PTRACE
48028 + bool "Ptrace logging"
48029 + help
48030 + If you say Y here, all attempts to attach to a process via ptrace
48031 + will be logged. If the sysctl option is enabled, a sysctl option
48032 + with name "audit_ptrace" is created.
48033 +
48034 +config GRKERNSEC_AUDIT_CHDIR
48035 + bool "Chdir logging"
48036 + help
48037 + If you say Y here, all chdir() calls will be logged. If the sysctl
48038 + option is enabled, a sysctl option with name "audit_chdir" is created.
48039 +
48040 +config GRKERNSEC_AUDIT_MOUNT
48041 + bool "(Un)Mount logging"
48042 + help
48043 + If you say Y here, all mounts and unmounts will be logged. If the
48044 + sysctl option is enabled, a sysctl option with name "audit_mount" is
48045 + created.
48046 +
48047 +config GRKERNSEC_SIGNAL
48048 + bool "Signal logging"
48049 + help
48050 + If you say Y here, certain important signals will be logged, such as
48051 + SIGSEGV, which will as a result inform you of when a error in a program
48052 + occurred, which in some cases could mean a possible exploit attempt.
48053 + If the sysctl option is enabled, a sysctl option with name
48054 + "signal_logging" is created.
48055 +
48056 +config GRKERNSEC_FORKFAIL
48057 + bool "Fork failure logging"
48058 + help
48059 + If you say Y here, all failed fork() attempts will be logged.
48060 + This could suggest a fork bomb, or someone attempting to overstep
48061 + their process limit. If the sysctl option is enabled, a sysctl option
48062 + with name "forkfail_logging" is created.
48063 +
48064 +config GRKERNSEC_TIME
48065 + bool "Time change logging"
48066 + help
48067 + If you say Y here, any changes of the system clock will be logged.
48068 + If the sysctl option is enabled, a sysctl option with name
48069 + "timechange_logging" is created.
48070 +
48071 +config GRKERNSEC_PROC_IPADDR
48072 + bool "/proc/<pid>/ipaddr support"
48073 + help
48074 + If you say Y here, a new entry will be added to each /proc/<pid>
48075 + directory that contains the IP address of the person using the task.
48076 + The IP is carried across local TCP and AF_UNIX stream sockets.
48077 + This information can be useful for IDS/IPSes to perform remote response
48078 + to a local attack. The entry is readable by only the owner of the
48079 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
48080 + the RBAC system), and thus does not create privacy concerns.
48081 +
48082 +config GRKERNSEC_RWXMAP_LOG
48083 + bool 'Denied RWX mmap/mprotect logging'
48084 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
48085 + help
48086 + If you say Y here, calls to mmap() and mprotect() with explicit
48087 + usage of PROT_WRITE and PROT_EXEC together will be logged when
48088 + denied by the PAX_MPROTECT feature. If the sysctl option is
48089 + enabled, a sysctl option with name "rwxmap_logging" is created.
48090 +
48091 +config GRKERNSEC_AUDIT_TEXTREL
48092 + bool 'ELF text relocations logging (READ HELP)'
48093 + depends on PAX_MPROTECT
48094 + help
48095 + If you say Y here, text relocations will be logged with the filename
48096 + of the offending library or binary. The purpose of the feature is
48097 + to help Linux distribution developers get rid of libraries and
48098 + binaries that need text relocations which hinder the future progress
48099 + of PaX. Only Linux distribution developers should say Y here, and
48100 + never on a production machine, as this option creates an information
48101 + leak that could aid an attacker in defeating the randomization of
48102 + a single memory region. If the sysctl option is enabled, a sysctl
48103 + option with name "audit_textrel" is created.
48104 +
48105 +endmenu
48106 +
48107 +menu "Executable Protections"
48108 +depends on GRKERNSEC
48109 +
48110 +config GRKERNSEC_DMESG
48111 + bool "Dmesg(8) restriction"
48112 + help
48113 + If you say Y here, non-root users will not be able to use dmesg(8)
48114 + to view up to the last 4kb of messages in the kernel's log buffer.
48115 + The kernel's log buffer often contains kernel addresses and other
48116 + identifying information useful to an attacker in fingerprinting a
48117 + system for a targeted exploit.
48118 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
48119 + created.
48120 +
48121 +config GRKERNSEC_HARDEN_PTRACE
48122 + bool "Deter ptrace-based process snooping"
48123 + help
48124 + If you say Y here, TTY sniffers and other malicious monitoring
48125 + programs implemented through ptrace will be defeated. If you
48126 + have been using the RBAC system, this option has already been
48127 + enabled for several years for all users, with the ability to make
48128 + fine-grained exceptions.
48129 +
48130 + This option only affects the ability of non-root users to ptrace
48131 + processes that are not a descendent of the ptracing process.
48132 + This means that strace ./binary and gdb ./binary will still work,
48133 + but attaching to arbitrary processes will not. If the sysctl
48134 + option is enabled, a sysctl option with name "harden_ptrace" is
48135 + created.
48136 +
48137 +config GRKERNSEC_PTRACE_READEXEC
48138 + bool "Require read access to ptrace sensitive binaries"
48139 + help
48140 + If you say Y here, unprivileged users will not be able to ptrace unreadable
48141 + binaries. This option is useful in environments that
48142 + remove the read bits (e.g. file mode 4711) from suid binaries to
48143 + prevent infoleaking of their contents. This option adds
48144 + consistency to the use of that file mode, as the binary could normally
48145 + be read out when run without privileges while ptracing.
48146 +
48147 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48148 + is created.
48149 +
48150 +config GRKERNSEC_SETXID
48151 + bool "Enforce consistent multithreaded privileges"
48152 + help
48153 + If you say Y here, a change from a root uid to a non-root uid
48154 + in a multithreaded application will cause the resulting uids,
48155 + gids, supplementary groups, and capabilities in that thread
48156 + to be propagated to the other threads of the process. In most
48157 + cases this is unnecessary, as glibc will emulate this behavior
48158 + on behalf of the application. Other libcs do not act in the
48159 + same way, allowing the other threads of the process to continue
48160 + running with root privileges. If the sysctl option is enabled,
48161 + a sysctl option with name "consistent_setxid" is created.
48162 +
48163 +config GRKERNSEC_TPE
48164 + bool "Trusted Path Execution (TPE)"
48165 + help
48166 + If you say Y here, you will be able to choose a gid to add to the
48167 + supplementary groups of users you want to mark as "untrusted."
48168 + These users will not be able to execute any files that are not in
48169 + root-owned directories writable only by root. If the sysctl option
48170 + is enabled, a sysctl option with name "tpe" is created.
48171 +
48172 +config GRKERNSEC_TPE_ALL
48173 + bool "Partially restrict all non-root users"
48174 + depends on GRKERNSEC_TPE
48175 + help
48176 + If you say Y here, all non-root users will be covered under
48177 + a weaker TPE restriction. This is separate from, and in addition to,
48178 + the main TPE options that you have selected elsewhere. Thus, if a
48179 + "trusted" GID is chosen, this restriction applies to even that GID.
48180 + Under this restriction, all non-root users will only be allowed to
48181 + execute files in directories they own that are not group or
48182 + world-writable, or in directories owned by root and writable only by
48183 + root. If the sysctl option is enabled, a sysctl option with name
48184 + "tpe_restrict_all" is created.
48185 +
48186 +config GRKERNSEC_TPE_INVERT
48187 + bool "Invert GID option"
48188 + depends on GRKERNSEC_TPE
48189 + help
48190 + If you say Y here, the group you specify in the TPE configuration will
48191 + decide what group TPE restrictions will be *disabled* for. This
48192 + option is useful if you want TPE restrictions to be applied to most
48193 + users on the system. If the sysctl option is enabled, a sysctl option
48194 + with name "tpe_invert" is created. Unlike other sysctl options, this
48195 + entry will default to on for backward-compatibility.
48196 +
48197 +config GRKERNSEC_TPE_GID
48198 + int "GID for untrusted users"
48199 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48200 + default 1005
48201 + help
48202 + Setting this GID determines what group TPE restrictions will be
48203 + *enabled* for. If the sysctl option is enabled, a sysctl option
48204 + with name "tpe_gid" is created.
48205 +
48206 +config GRKERNSEC_TPE_GID
48207 + int "GID for trusted users"
48208 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48209 + default 1005
48210 + help
48211 + Setting this GID determines what group TPE restrictions will be
48212 + *disabled* for. If the sysctl option is enabled, a sysctl option
48213 + with name "tpe_gid" is created.
48214 +
48215 +endmenu
48216 +menu "Network Protections"
48217 +depends on GRKERNSEC
48218 +
48219 +config GRKERNSEC_RANDNET
48220 + bool "Larger entropy pools"
48221 + help
48222 + If you say Y here, the entropy pools used for many features of Linux
48223 + and grsecurity will be doubled in size. Since several grsecurity
48224 + features use additional randomness, it is recommended that you say Y
48225 + here. Saying Y here has a similar effect as modifying
48226 + /proc/sys/kernel/random/poolsize.
48227 +
48228 +config GRKERNSEC_BLACKHOLE
48229 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48230 + depends on NET
48231 + help
48232 + If you say Y here, neither TCP resets nor ICMP
48233 + destination-unreachable packets will be sent in response to packets
48234 + sent to ports for which no associated listening process exists.
48235 + This feature supports both IPV4 and IPV6 and exempts the
48236 + loopback interface from blackholing. Enabling this feature
48237 + makes a host more resilient to DoS attacks and reduces network
48238 + visibility against scanners.
48239 +
48240 + The blackhole feature as-implemented is equivalent to the FreeBSD
48241 + blackhole feature, as it prevents RST responses to all packets, not
48242 + just SYNs. Under most application behavior this causes no
48243 + problems, but applications (like haproxy) may not close certain
48244 + connections in a way that cleanly terminates them on the remote
48245 + end, leaving the remote host in LAST_ACK state. Because of this
48246 + side-effect and to prevent intentional LAST_ACK DoSes, this
48247 + feature also adds automatic mitigation against such attacks.
48248 + The mitigation drastically reduces the amount of time a socket
48249 + can spend in LAST_ACK state. If you're using haproxy and not
48250 + all servers it connects to have this option enabled, consider
48251 + disabling this feature on the haproxy host.
48252 +
48253 + If the sysctl option is enabled, two sysctl options with names
48254 + "ip_blackhole" and "lastack_retries" will be created.
48255 + While "ip_blackhole" takes the standard zero/non-zero on/off
48256 + toggle, "lastack_retries" uses the same kinds of values as
48257 + "tcp_retries1" and "tcp_retries2". The default value of 4
48258 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48259 + state.
48260 +
48261 +config GRKERNSEC_SOCKET
48262 + bool "Socket restrictions"
48263 + depends on NET
48264 + help
48265 + If you say Y here, you will be able to choose from several options.
48266 + If you assign a GID on your system and add it to the supplementary
48267 + groups of users you want to restrict socket access to, this patch
48268 + will perform up to three things, based on the option(s) you choose.
48269 +
48270 +config GRKERNSEC_SOCKET_ALL
48271 + bool "Deny any sockets to group"
48272 + depends on GRKERNSEC_SOCKET
48273 + help
48274 + If you say Y here, you will be able to choose a GID of whose users will
48275 + be unable to connect to other hosts from your machine or run server
48276 + applications from your machine. If the sysctl option is enabled, a
48277 + sysctl option with name "socket_all" is created.
48278 +
48279 +config GRKERNSEC_SOCKET_ALL_GID
48280 + int "GID to deny all sockets for"
48281 + depends on GRKERNSEC_SOCKET_ALL
48282 + default 1004
48283 + help
48284 + Here you can choose the GID to disable socket access for. Remember to
48285 + add the users you want socket access disabled for to the GID
48286 + specified here. If the sysctl option is enabled, a sysctl option
48287 + with name "socket_all_gid" is created.
48288 +
48289 +config GRKERNSEC_SOCKET_CLIENT
48290 + bool "Deny client sockets to group"
48291 + depends on GRKERNSEC_SOCKET
48292 + help
48293 + If you say Y here, you will be able to choose a GID of whose users will
48294 + be unable to connect to other hosts from your machine, but will be
48295 + able to run servers. If this option is enabled, all users in the group
48296 + you specify will have to use passive mode when initiating ftp transfers
48297 + from the shell on your machine. If the sysctl option is enabled, a
48298 + sysctl option with name "socket_client" is created.
48299 +
48300 +config GRKERNSEC_SOCKET_CLIENT_GID
48301 + int "GID to deny client sockets for"
48302 + depends on GRKERNSEC_SOCKET_CLIENT
48303 + default 1003
48304 + help
48305 + Here you can choose the GID to disable client socket access for.
48306 + Remember to add the users you want client socket access disabled for to
48307 + the GID specified here. If the sysctl option is enabled, a sysctl
48308 + option with name "socket_client_gid" is created.
48309 +
48310 +config GRKERNSEC_SOCKET_SERVER
48311 + bool "Deny server sockets to group"
48312 + depends on GRKERNSEC_SOCKET
48313 + help
48314 + If you say Y here, you will be able to choose a GID of whose users will
48315 + be unable to run server applications from your machine. If the sysctl
48316 + option is enabled, a sysctl option with name "socket_server" is created.
48317 +
48318 +config GRKERNSEC_SOCKET_SERVER_GID
48319 + int "GID to deny server sockets for"
48320 + depends on GRKERNSEC_SOCKET_SERVER
48321 + default 1002
48322 + help
48323 + Here you can choose the GID to disable server socket access for.
48324 + Remember to add the users you want server socket access disabled for to
48325 + the GID specified here. If the sysctl option is enabled, a sysctl
48326 + option with name "socket_server_gid" is created.
48327 +
48328 +endmenu
48329 +menu "Sysctl support"
48330 +depends on GRKERNSEC && SYSCTL
48331 +
48332 +config GRKERNSEC_SYSCTL
48333 + bool "Sysctl support"
48334 + help
48335 + If you say Y here, you will be able to change the options that
48336 + grsecurity runs with at bootup, without having to recompile your
48337 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48338 + to enable (1) or disable (0) various features. All the sysctl entries
48339 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48340 + All features enabled in the kernel configuration are disabled at boot
48341 + if you do not say Y to the "Turn on features by default" option.
48342 + All options should be set at startup, and the grsec_lock entry should
48343 + be set to a non-zero value after all the options are set.
48344 + *THIS IS EXTREMELY IMPORTANT*
48345 +
48346 +config GRKERNSEC_SYSCTL_DISTRO
48347 + bool "Extra sysctl support for distro makers (READ HELP)"
48348 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48349 + help
48350 + If you say Y here, additional sysctl options will be created
48351 + for features that affect processes running as root. Therefore,
48352 + it is critical when using this option that the grsec_lock entry be
48353 + enabled after boot. Only distros with prebuilt kernel packages
48354 + with this option enabled that can ensure grsec_lock is enabled
48355 + after boot should use this option.
48356 + *Failure to set grsec_lock after boot makes all grsec features
48357 + this option covers useless*
48358 +
48359 + Currently this option creates the following sysctl entries:
48360 + "Disable Privileged I/O": "disable_priv_io"
48361 +
48362 +config GRKERNSEC_SYSCTL_ON
48363 + bool "Turn on features by default"
48364 + depends on GRKERNSEC_SYSCTL
48365 + help
48366 + If you say Y here, instead of having all features enabled in the
48367 + kernel configuration disabled at boot time, the features will be
48368 + enabled at boot time. It is recommended you say Y here unless
48369 + there is some reason you would want all sysctl-tunable features to
48370 + be disabled by default. As mentioned elsewhere, it is important
48371 + to enable the grsec_lock entry once you have finished modifying
48372 + the sysctl entries.
48373 +
48374 +endmenu
48375 +menu "Logging Options"
48376 +depends on GRKERNSEC
48377 +
48378 +config GRKERNSEC_FLOODTIME
48379 + int "Seconds in between log messages (minimum)"
48380 + default 10
48381 + help
48382 + This option allows you to enforce the number of seconds between
48383 + grsecurity log messages. The default should be suitable for most
48384 + people, however, if you choose to change it, choose a value small enough
48385 + to allow informative logs to be produced, but large enough to
48386 + prevent flooding.
48387 +
48388 +config GRKERNSEC_FLOODBURST
48389 + int "Number of messages in a burst (maximum)"
48390 + default 6
48391 + help
48392 + This option allows you to choose the maximum number of messages allowed
48393 + within the flood time interval you chose in a separate option. The
48394 + default should be suitable for most people, however if you find that
48395 + many of your logs are being interpreted as flooding, you may want to
48396 + raise this value.
48397 +
48398 +endmenu
48399 +
48400 +endmenu
48401 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48402 new file mode 100644
48403 index 0000000..496e60d
48404 --- /dev/null
48405 +++ b/grsecurity/Makefile
48406 @@ -0,0 +1,40 @@
48407 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48408 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48409 +# into an RBAC system
48410 +#
48411 +# All code in this directory and various hooks inserted throughout the kernel
48412 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48413 +# under the GPL v2 or higher
48414 +
48415 +ifndef CONFIG_IA64
48416 +KBUILD_CFLAGS += -Werror
48417 +endif
48418 +
48419 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48420 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48421 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48422 +
48423 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48424 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48425 + gracl_learn.o grsec_log.o
48426 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48427 +
48428 +ifdef CONFIG_NET
48429 +obj-y += grsec_sock.o
48430 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48431 +endif
48432 +
48433 +ifndef CONFIG_GRKERNSEC
48434 +obj-y += grsec_disabled.o
48435 +endif
48436 +
48437 +ifdef CONFIG_GRKERNSEC_HIDESYM
48438 +extra-y := grsec_hidesym.o
48439 +$(obj)/grsec_hidesym.o:
48440 + @-chmod -f 500 /boot
48441 + @-chmod -f 500 /lib/modules
48442 + @-chmod -f 500 /lib64/modules
48443 + @-chmod -f 500 /lib32/modules
48444 + @-chmod -f 700 .
48445 + @echo ' grsec: protected kernel image paths'
48446 +endif
48447 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48448 new file mode 100644
48449 index 0000000..7715893
48450 --- /dev/null
48451 +++ b/grsecurity/gracl.c
48452 @@ -0,0 +1,4164 @@
48453 +#include <linux/kernel.h>
48454 +#include <linux/module.h>
48455 +#include <linux/sched.h>
48456 +#include <linux/mm.h>
48457 +#include <linux/file.h>
48458 +#include <linux/fs.h>
48459 +#include <linux/namei.h>
48460 +#include <linux/mount.h>
48461 +#include <linux/tty.h>
48462 +#include <linux/proc_fs.h>
48463 +#include <linux/lglock.h>
48464 +#include <linux/slab.h>
48465 +#include <linux/vmalloc.h>
48466 +#include <linux/types.h>
48467 +#include <linux/sysctl.h>
48468 +#include <linux/netdevice.h>
48469 +#include <linux/ptrace.h>
48470 +#include <linux/gracl.h>
48471 +#include <linux/gralloc.h>
48472 +#include <linux/security.h>
48473 +#include <linux/grinternal.h>
48474 +#include <linux/pid_namespace.h>
48475 +#include <linux/fdtable.h>
48476 +#include <linux/percpu.h>
48477 +
48478 +#include <asm/uaccess.h>
48479 +#include <asm/errno.h>
48480 +#include <asm/mman.h>
48481 +
48482 +static struct acl_role_db acl_role_set;
48483 +static struct name_db name_set;
48484 +static struct inodev_db inodev_set;
48485 +
48486 +/* for keeping track of userspace pointers used for subjects, so we
48487 + can share references in the kernel as well
48488 +*/
48489 +
48490 +static struct path real_root;
48491 +
48492 +static struct acl_subj_map_db subj_map_set;
48493 +
48494 +static struct acl_role_label *default_role;
48495 +
48496 +static struct acl_role_label *role_list;
48497 +
48498 +static u16 acl_sp_role_value;
48499 +
48500 +extern char *gr_shared_page[4];
48501 +static DEFINE_MUTEX(gr_dev_mutex);
48502 +DEFINE_RWLOCK(gr_inode_lock);
48503 +
48504 +struct gr_arg *gr_usermode;
48505 +
48506 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48507 +
48508 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48509 +extern void gr_clear_learn_entries(void);
48510 +
48511 +#ifdef CONFIG_GRKERNSEC_RESLOG
48512 +extern void gr_log_resource(const struct task_struct *task,
48513 + const int res, const unsigned long wanted, const int gt);
48514 +#endif
48515 +
48516 +unsigned char *gr_system_salt;
48517 +unsigned char *gr_system_sum;
48518 +
48519 +static struct sprole_pw **acl_special_roles = NULL;
48520 +static __u16 num_sprole_pws = 0;
48521 +
48522 +static struct acl_role_label *kernel_role = NULL;
48523 +
48524 +static unsigned int gr_auth_attempts = 0;
48525 +static unsigned long gr_auth_expires = 0UL;
48526 +
48527 +#ifdef CONFIG_NET
48528 +extern struct vfsmount *sock_mnt;
48529 +#endif
48530 +
48531 +extern struct vfsmount *pipe_mnt;
48532 +extern struct vfsmount *shm_mnt;
48533 +#ifdef CONFIG_HUGETLBFS
48534 +extern struct vfsmount *hugetlbfs_vfsmount;
48535 +#endif
48536 +
48537 +static struct acl_object_label *fakefs_obj_rw;
48538 +static struct acl_object_label *fakefs_obj_rwx;
48539 +
48540 +extern int gr_init_uidset(void);
48541 +extern void gr_free_uidset(void);
48542 +extern void gr_remove_uid(uid_t uid);
48543 +extern int gr_find_uid(uid_t uid);
48544 +
48545 +DECLARE_BRLOCK(vfsmount_lock);
48546 +
48547 +__inline__ int
48548 +gr_acl_is_enabled(void)
48549 +{
48550 + return (gr_status & GR_READY);
48551 +}
48552 +
48553 +#ifdef CONFIG_BTRFS_FS
48554 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48555 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48556 +#endif
48557 +
48558 +static inline dev_t __get_dev(const struct dentry *dentry)
48559 +{
48560 +#ifdef CONFIG_BTRFS_FS
48561 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48562 + return get_btrfs_dev_from_inode(dentry->d_inode);
48563 + else
48564 +#endif
48565 + return dentry->d_inode->i_sb->s_dev;
48566 +}
48567 +
48568 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48569 +{
48570 + return __get_dev(dentry);
48571 +}
48572 +
48573 +static char gr_task_roletype_to_char(struct task_struct *task)
48574 +{
48575 + switch (task->role->roletype &
48576 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48577 + GR_ROLE_SPECIAL)) {
48578 + case GR_ROLE_DEFAULT:
48579 + return 'D';
48580 + case GR_ROLE_USER:
48581 + return 'U';
48582 + case GR_ROLE_GROUP:
48583 + return 'G';
48584 + case GR_ROLE_SPECIAL:
48585 + return 'S';
48586 + }
48587 +
48588 + return 'X';
48589 +}
48590 +
48591 +char gr_roletype_to_char(void)
48592 +{
48593 + return gr_task_roletype_to_char(current);
48594 +}
48595 +
48596 +__inline__ int
48597 +gr_acl_tpe_check(void)
48598 +{
48599 + if (unlikely(!(gr_status & GR_READY)))
48600 + return 0;
48601 + if (current->role->roletype & GR_ROLE_TPE)
48602 + return 1;
48603 + else
48604 + return 0;
48605 +}
48606 +
48607 +int
48608 +gr_handle_rawio(const struct inode *inode)
48609 +{
48610 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48611 + if (inode && S_ISBLK(inode->i_mode) &&
48612 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48613 + !capable(CAP_SYS_RAWIO))
48614 + return 1;
48615 +#endif
48616 + return 0;
48617 +}
48618 +
48619 +static int
48620 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48621 +{
48622 + if (likely(lena != lenb))
48623 + return 0;
48624 +
48625 + return !memcmp(a, b, lena);
48626 +}
48627 +
48628 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48629 +{
48630 + *buflen -= namelen;
48631 + if (*buflen < 0)
48632 + return -ENAMETOOLONG;
48633 + *buffer -= namelen;
48634 + memcpy(*buffer, str, namelen);
48635 + return 0;
48636 +}
48637 +
48638 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48639 +{
48640 + return prepend(buffer, buflen, name->name, name->len);
48641 +}
48642 +
48643 +static int prepend_path(const struct path *path, struct path *root,
48644 + char **buffer, int *buflen)
48645 +{
48646 + struct dentry *dentry = path->dentry;
48647 + struct vfsmount *vfsmnt = path->mnt;
48648 + bool slash = false;
48649 + int error = 0;
48650 +
48651 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48652 + struct dentry * parent;
48653 +
48654 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48655 + /* Global root? */
48656 + if (vfsmnt->mnt_parent == vfsmnt) {
48657 + goto out;
48658 + }
48659 + dentry = vfsmnt->mnt_mountpoint;
48660 + vfsmnt = vfsmnt->mnt_parent;
48661 + continue;
48662 + }
48663 + parent = dentry->d_parent;
48664 + prefetch(parent);
48665 + spin_lock(&dentry->d_lock);
48666 + error = prepend_name(buffer, buflen, &dentry->d_name);
48667 + spin_unlock(&dentry->d_lock);
48668 + if (!error)
48669 + error = prepend(buffer, buflen, "/", 1);
48670 + if (error)
48671 + break;
48672 +
48673 + slash = true;
48674 + dentry = parent;
48675 + }
48676 +
48677 +out:
48678 + if (!error && !slash)
48679 + error = prepend(buffer, buflen, "/", 1);
48680 +
48681 + return error;
48682 +}
48683 +
48684 +/* this must be called with vfsmount_lock and rename_lock held */
48685 +
48686 +static char *__our_d_path(const struct path *path, struct path *root,
48687 + char *buf, int buflen)
48688 +{
48689 + char *res = buf + buflen;
48690 + int error;
48691 +
48692 + prepend(&res, &buflen, "\0", 1);
48693 + error = prepend_path(path, root, &res, &buflen);
48694 + if (error)
48695 + return ERR_PTR(error);
48696 +
48697 + return res;
48698 +}
48699 +
48700 +static char *
48701 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48702 +{
48703 + char *retval;
48704 +
48705 + retval = __our_d_path(path, root, buf, buflen);
48706 + if (unlikely(IS_ERR(retval)))
48707 + retval = strcpy(buf, "<path too long>");
48708 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48709 + retval[1] = '\0';
48710 +
48711 + return retval;
48712 +}
48713 +
48714 +static char *
48715 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48716 + char *buf, int buflen)
48717 +{
48718 + struct path path;
48719 + char *res;
48720 +
48721 + path.dentry = (struct dentry *)dentry;
48722 + path.mnt = (struct vfsmount *)vfsmnt;
48723 +
48724 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48725 + by the RBAC system */
48726 + res = gen_full_path(&path, &real_root, buf, buflen);
48727 +
48728 + return res;
48729 +}
48730 +
48731 +static char *
48732 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48733 + char *buf, int buflen)
48734 +{
48735 + char *res;
48736 + struct path path;
48737 + struct path root;
48738 + struct task_struct *reaper = &init_task;
48739 +
48740 + path.dentry = (struct dentry *)dentry;
48741 + path.mnt = (struct vfsmount *)vfsmnt;
48742 +
48743 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48744 + get_fs_root(reaper->fs, &root);
48745 +
48746 + write_seqlock(&rename_lock);
48747 + br_read_lock(vfsmount_lock);
48748 + res = gen_full_path(&path, &root, buf, buflen);
48749 + br_read_unlock(vfsmount_lock);
48750 + write_sequnlock(&rename_lock);
48751 +
48752 + path_put(&root);
48753 + return res;
48754 +}
48755 +
48756 +static char *
48757 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48758 +{
48759 + char *ret;
48760 + write_seqlock(&rename_lock);
48761 + br_read_lock(vfsmount_lock);
48762 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48763 + PAGE_SIZE);
48764 + br_read_unlock(vfsmount_lock);
48765 + write_sequnlock(&rename_lock);
48766 + return ret;
48767 +}
48768 +
48769 +static char *
48770 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48771 +{
48772 + char *ret;
48773 + char *buf;
48774 + int buflen;
48775 +
48776 + write_seqlock(&rename_lock);
48777 + br_read_lock(vfsmount_lock);
48778 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48779 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48780 + buflen = (int)(ret - buf);
48781 + if (buflen >= 5)
48782 + prepend(&ret, &buflen, "/proc", 5);
48783 + else
48784 + ret = strcpy(buf, "<path too long>");
48785 + br_read_unlock(vfsmount_lock);
48786 + write_sequnlock(&rename_lock);
48787 + return ret;
48788 +}
48789 +
48790 +char *
48791 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48792 +{
48793 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48794 + PAGE_SIZE);
48795 +}
48796 +
48797 +char *
48798 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48799 +{
48800 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48801 + PAGE_SIZE);
48802 +}
48803 +
48804 +char *
48805 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48806 +{
48807 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48808 + PAGE_SIZE);
48809 +}
48810 +
48811 +char *
48812 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48813 +{
48814 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48815 + PAGE_SIZE);
48816 +}
48817 +
48818 +char *
48819 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48820 +{
48821 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48822 + PAGE_SIZE);
48823 +}
48824 +
48825 +__inline__ __u32
48826 +to_gr_audit(const __u32 reqmode)
48827 +{
48828 + /* masks off auditable permission flags, then shifts them to create
48829 + auditing flags, and adds the special case of append auditing if
48830 + we're requesting write */
48831 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48832 +}
48833 +
48834 +struct acl_subject_label *
48835 +lookup_subject_map(const struct acl_subject_label *userp)
48836 +{
48837 + unsigned int index = shash(userp, subj_map_set.s_size);
48838 + struct subject_map *match;
48839 +
48840 + match = subj_map_set.s_hash[index];
48841 +
48842 + while (match && match->user != userp)
48843 + match = match->next;
48844 +
48845 + if (match != NULL)
48846 + return match->kernel;
48847 + else
48848 + return NULL;
48849 +}
48850 +
48851 +static void
48852 +insert_subj_map_entry(struct subject_map *subjmap)
48853 +{
48854 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48855 + struct subject_map **curr;
48856 +
48857 + subjmap->prev = NULL;
48858 +
48859 + curr = &subj_map_set.s_hash[index];
48860 + if (*curr != NULL)
48861 + (*curr)->prev = subjmap;
48862 +
48863 + subjmap->next = *curr;
48864 + *curr = subjmap;
48865 +
48866 + return;
48867 +}
48868 +
48869 +static struct acl_role_label *
48870 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48871 + const gid_t gid)
48872 +{
48873 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48874 + struct acl_role_label *match;
48875 + struct role_allowed_ip *ipp;
48876 + unsigned int x;
48877 + u32 curr_ip = task->signal->curr_ip;
48878 +
48879 + task->signal->saved_ip = curr_ip;
48880 +
48881 + match = acl_role_set.r_hash[index];
48882 +
48883 + while (match) {
48884 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48885 + for (x = 0; x < match->domain_child_num; x++) {
48886 + if (match->domain_children[x] == uid)
48887 + goto found;
48888 + }
48889 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48890 + break;
48891 + match = match->next;
48892 + }
48893 +found:
48894 + if (match == NULL) {
48895 + try_group:
48896 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48897 + match = acl_role_set.r_hash[index];
48898 +
48899 + while (match) {
48900 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48901 + for (x = 0; x < match->domain_child_num; x++) {
48902 + if (match->domain_children[x] == gid)
48903 + goto found2;
48904 + }
48905 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48906 + break;
48907 + match = match->next;
48908 + }
48909 +found2:
48910 + if (match == NULL)
48911 + match = default_role;
48912 + if (match->allowed_ips == NULL)
48913 + return match;
48914 + else {
48915 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48916 + if (likely
48917 + ((ntohl(curr_ip) & ipp->netmask) ==
48918 + (ntohl(ipp->addr) & ipp->netmask)))
48919 + return match;
48920 + }
48921 + match = default_role;
48922 + }
48923 + } else if (match->allowed_ips == NULL) {
48924 + return match;
48925 + } else {
48926 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48927 + if (likely
48928 + ((ntohl(curr_ip) & ipp->netmask) ==
48929 + (ntohl(ipp->addr) & ipp->netmask)))
48930 + return match;
48931 + }
48932 + goto try_group;
48933 + }
48934 +
48935 + return match;
48936 +}
48937 +
48938 +struct acl_subject_label *
48939 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48940 + const struct acl_role_label *role)
48941 +{
48942 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48943 + struct acl_subject_label *match;
48944 +
48945 + match = role->subj_hash[index];
48946 +
48947 + while (match && (match->inode != ino || match->device != dev ||
48948 + (match->mode & GR_DELETED))) {
48949 + match = match->next;
48950 + }
48951 +
48952 + if (match && !(match->mode & GR_DELETED))
48953 + return match;
48954 + else
48955 + return NULL;
48956 +}
48957 +
48958 +struct acl_subject_label *
48959 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48960 + const struct acl_role_label *role)
48961 +{
48962 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48963 + struct acl_subject_label *match;
48964 +
48965 + match = role->subj_hash[index];
48966 +
48967 + while (match && (match->inode != ino || match->device != dev ||
48968 + !(match->mode & GR_DELETED))) {
48969 + match = match->next;
48970 + }
48971 +
48972 + if (match && (match->mode & GR_DELETED))
48973 + return match;
48974 + else
48975 + return NULL;
48976 +}
48977 +
48978 +static struct acl_object_label *
48979 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48980 + const struct acl_subject_label *subj)
48981 +{
48982 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48983 + struct acl_object_label *match;
48984 +
48985 + match = subj->obj_hash[index];
48986 +
48987 + while (match && (match->inode != ino || match->device != dev ||
48988 + (match->mode & GR_DELETED))) {
48989 + match = match->next;
48990 + }
48991 +
48992 + if (match && !(match->mode & GR_DELETED))
48993 + return match;
48994 + else
48995 + return NULL;
48996 +}
48997 +
48998 +static struct acl_object_label *
48999 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
49000 + const struct acl_subject_label *subj)
49001 +{
49002 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
49003 + struct acl_object_label *match;
49004 +
49005 + match = subj->obj_hash[index];
49006 +
49007 + while (match && (match->inode != ino || match->device != dev ||
49008 + !(match->mode & GR_DELETED))) {
49009 + match = match->next;
49010 + }
49011 +
49012 + if (match && (match->mode & GR_DELETED))
49013 + return match;
49014 +
49015 + match = subj->obj_hash[index];
49016 +
49017 + while (match && (match->inode != ino || match->device != dev ||
49018 + (match->mode & GR_DELETED))) {
49019 + match = match->next;
49020 + }
49021 +
49022 + if (match && !(match->mode & GR_DELETED))
49023 + return match;
49024 + else
49025 + return NULL;
49026 +}
49027 +
49028 +static struct name_entry *
49029 +lookup_name_entry(const char *name)
49030 +{
49031 + unsigned int len = strlen(name);
49032 + unsigned int key = full_name_hash(name, len);
49033 + unsigned int index = key % name_set.n_size;
49034 + struct name_entry *match;
49035 +
49036 + match = name_set.n_hash[index];
49037 +
49038 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
49039 + match = match->next;
49040 +
49041 + return match;
49042 +}
49043 +
49044 +static struct name_entry *
49045 +lookup_name_entry_create(const char *name)
49046 +{
49047 + unsigned int len = strlen(name);
49048 + unsigned int key = full_name_hash(name, len);
49049 + unsigned int index = key % name_set.n_size;
49050 + struct name_entry *match;
49051 +
49052 + match = name_set.n_hash[index];
49053 +
49054 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49055 + !match->deleted))
49056 + match = match->next;
49057 +
49058 + if (match && match->deleted)
49059 + return match;
49060 +
49061 + match = name_set.n_hash[index];
49062 +
49063 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
49064 + match->deleted))
49065 + match = match->next;
49066 +
49067 + if (match && !match->deleted)
49068 + return match;
49069 + else
49070 + return NULL;
49071 +}
49072 +
49073 +static struct inodev_entry *
49074 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
49075 +{
49076 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
49077 + struct inodev_entry *match;
49078 +
49079 + match = inodev_set.i_hash[index];
49080 +
49081 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
49082 + match = match->next;
49083 +
49084 + return match;
49085 +}
49086 +
49087 +static void
49088 +insert_inodev_entry(struct inodev_entry *entry)
49089 +{
49090 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
49091 + inodev_set.i_size);
49092 + struct inodev_entry **curr;
49093 +
49094 + entry->prev = NULL;
49095 +
49096 + curr = &inodev_set.i_hash[index];
49097 + if (*curr != NULL)
49098 + (*curr)->prev = entry;
49099 +
49100 + entry->next = *curr;
49101 + *curr = entry;
49102 +
49103 + return;
49104 +}
49105 +
49106 +static void
49107 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
49108 +{
49109 + unsigned int index =
49110 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
49111 + struct acl_role_label **curr;
49112 + struct acl_role_label *tmp;
49113 +
49114 + curr = &acl_role_set.r_hash[index];
49115 +
49116 + /* if role was already inserted due to domains and already has
49117 + a role in the same bucket as it attached, then we need to
49118 + combine these two buckets
49119 + */
49120 + if (role->next) {
49121 + tmp = role->next;
49122 + while (tmp->next)
49123 + tmp = tmp->next;
49124 + tmp->next = *curr;
49125 + } else
49126 + role->next = *curr;
49127 + *curr = role;
49128 +
49129 + return;
49130 +}
49131 +
49132 +static void
49133 +insert_acl_role_label(struct acl_role_label *role)
49134 +{
49135 + int i;
49136 +
49137 + if (role_list == NULL) {
49138 + role_list = role;
49139 + role->prev = NULL;
49140 + } else {
49141 + role->prev = role_list;
49142 + role_list = role;
49143 + }
49144 +
49145 + /* used for hash chains */
49146 + role->next = NULL;
49147 +
49148 + if (role->roletype & GR_ROLE_DOMAIN) {
49149 + for (i = 0; i < role->domain_child_num; i++)
49150 + __insert_acl_role_label(role, role->domain_children[i]);
49151 + } else
49152 + __insert_acl_role_label(role, role->uidgid);
49153 +}
49154 +
49155 +static int
49156 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49157 +{
49158 + struct name_entry **curr, *nentry;
49159 + struct inodev_entry *ientry;
49160 + unsigned int len = strlen(name);
49161 + unsigned int key = full_name_hash(name, len);
49162 + unsigned int index = key % name_set.n_size;
49163 +
49164 + curr = &name_set.n_hash[index];
49165 +
49166 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49167 + curr = &((*curr)->next);
49168 +
49169 + if (*curr != NULL)
49170 + return 1;
49171 +
49172 + nentry = acl_alloc(sizeof (struct name_entry));
49173 + if (nentry == NULL)
49174 + return 0;
49175 + ientry = acl_alloc(sizeof (struct inodev_entry));
49176 + if (ientry == NULL)
49177 + return 0;
49178 + ientry->nentry = nentry;
49179 +
49180 + nentry->key = key;
49181 + nentry->name = name;
49182 + nentry->inode = inode;
49183 + nentry->device = device;
49184 + nentry->len = len;
49185 + nentry->deleted = deleted;
49186 +
49187 + nentry->prev = NULL;
49188 + curr = &name_set.n_hash[index];
49189 + if (*curr != NULL)
49190 + (*curr)->prev = nentry;
49191 + nentry->next = *curr;
49192 + *curr = nentry;
49193 +
49194 + /* insert us into the table searchable by inode/dev */
49195 + insert_inodev_entry(ientry);
49196 +
49197 + return 1;
49198 +}
49199 +
49200 +static void
49201 +insert_acl_obj_label(struct acl_object_label *obj,
49202 + struct acl_subject_label *subj)
49203 +{
49204 + unsigned int index =
49205 + fhash(obj->inode, obj->device, subj->obj_hash_size);
49206 + struct acl_object_label **curr;
49207 +
49208 +
49209 + obj->prev = NULL;
49210 +
49211 + curr = &subj->obj_hash[index];
49212 + if (*curr != NULL)
49213 + (*curr)->prev = obj;
49214 +
49215 + obj->next = *curr;
49216 + *curr = obj;
49217 +
49218 + return;
49219 +}
49220 +
49221 +static void
49222 +insert_acl_subj_label(struct acl_subject_label *obj,
49223 + struct acl_role_label *role)
49224 +{
49225 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49226 + struct acl_subject_label **curr;
49227 +
49228 + obj->prev = NULL;
49229 +
49230 + curr = &role->subj_hash[index];
49231 + if (*curr != NULL)
49232 + (*curr)->prev = obj;
49233 +
49234 + obj->next = *curr;
49235 + *curr = obj;
49236 +
49237 + return;
49238 +}
49239 +
49240 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49241 +
49242 +static void *
49243 +create_table(__u32 * len, int elementsize)
49244 +{
49245 + unsigned int table_sizes[] = {
49246 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49247 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49248 + 4194301, 8388593, 16777213, 33554393, 67108859
49249 + };
49250 + void *newtable = NULL;
49251 + unsigned int pwr = 0;
49252 +
49253 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49254 + table_sizes[pwr] <= *len)
49255 + pwr++;
49256 +
49257 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49258 + return newtable;
49259 +
49260 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49261 + newtable =
49262 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49263 + else
49264 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49265 +
49266 + *len = table_sizes[pwr];
49267 +
49268 + return newtable;
49269 +}
49270 +
49271 +static int
49272 +init_variables(const struct gr_arg *arg)
49273 +{
49274 + struct task_struct *reaper = &init_task;
49275 + unsigned int stacksize;
49276 +
49277 + subj_map_set.s_size = arg->role_db.num_subjects;
49278 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49279 + name_set.n_size = arg->role_db.num_objects;
49280 + inodev_set.i_size = arg->role_db.num_objects;
49281 +
49282 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49283 + !name_set.n_size || !inodev_set.i_size)
49284 + return 1;
49285 +
49286 + if (!gr_init_uidset())
49287 + return 1;
49288 +
49289 + /* set up the stack that holds allocation info */
49290 +
49291 + stacksize = arg->role_db.num_pointers + 5;
49292 +
49293 + if (!acl_alloc_stack_init(stacksize))
49294 + return 1;
49295 +
49296 + /* grab reference for the real root dentry and vfsmount */
49297 + get_fs_root(reaper->fs, &real_root);
49298 +
49299 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49300 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49301 +#endif
49302 +
49303 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49304 + if (fakefs_obj_rw == NULL)
49305 + return 1;
49306 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49307 +
49308 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49309 + if (fakefs_obj_rwx == NULL)
49310 + return 1;
49311 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49312 +
49313 + subj_map_set.s_hash =
49314 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49315 + acl_role_set.r_hash =
49316 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49317 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49318 + inodev_set.i_hash =
49319 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49320 +
49321 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49322 + !name_set.n_hash || !inodev_set.i_hash)
49323 + return 1;
49324 +
49325 + memset(subj_map_set.s_hash, 0,
49326 + sizeof(struct subject_map *) * subj_map_set.s_size);
49327 + memset(acl_role_set.r_hash, 0,
49328 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49329 + memset(name_set.n_hash, 0,
49330 + sizeof (struct name_entry *) * name_set.n_size);
49331 + memset(inodev_set.i_hash, 0,
49332 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49333 +
49334 + return 0;
49335 +}
49336 +
49337 +/* free information not needed after startup
49338 + currently contains user->kernel pointer mappings for subjects
49339 +*/
49340 +
49341 +static void
49342 +free_init_variables(void)
49343 +{
49344 + __u32 i;
49345 +
49346 + if (subj_map_set.s_hash) {
49347 + for (i = 0; i < subj_map_set.s_size; i++) {
49348 + if (subj_map_set.s_hash[i]) {
49349 + kfree(subj_map_set.s_hash[i]);
49350 + subj_map_set.s_hash[i] = NULL;
49351 + }
49352 + }
49353 +
49354 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49355 + PAGE_SIZE)
49356 + kfree(subj_map_set.s_hash);
49357 + else
49358 + vfree(subj_map_set.s_hash);
49359 + }
49360 +
49361 + return;
49362 +}
49363 +
49364 +static void
49365 +free_variables(void)
49366 +{
49367 + struct acl_subject_label *s;
49368 + struct acl_role_label *r;
49369 + struct task_struct *task, *task2;
49370 + unsigned int x;
49371 +
49372 + gr_clear_learn_entries();
49373 +
49374 + read_lock(&tasklist_lock);
49375 + do_each_thread(task2, task) {
49376 + task->acl_sp_role = 0;
49377 + task->acl_role_id = 0;
49378 + task->acl = NULL;
49379 + task->role = NULL;
49380 + } while_each_thread(task2, task);
49381 + read_unlock(&tasklist_lock);
49382 +
49383 + /* release the reference to the real root dentry and vfsmount */
49384 + path_put(&real_root);
49385 +
49386 + /* free all object hash tables */
49387 +
49388 + FOR_EACH_ROLE_START(r)
49389 + if (r->subj_hash == NULL)
49390 + goto next_role;
49391 + FOR_EACH_SUBJECT_START(r, s, x)
49392 + if (s->obj_hash == NULL)
49393 + break;
49394 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49395 + kfree(s->obj_hash);
49396 + else
49397 + vfree(s->obj_hash);
49398 + FOR_EACH_SUBJECT_END(s, x)
49399 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49400 + if (s->obj_hash == NULL)
49401 + break;
49402 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49403 + kfree(s->obj_hash);
49404 + else
49405 + vfree(s->obj_hash);
49406 + FOR_EACH_NESTED_SUBJECT_END(s)
49407 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49408 + kfree(r->subj_hash);
49409 + else
49410 + vfree(r->subj_hash);
49411 + r->subj_hash = NULL;
49412 +next_role:
49413 + FOR_EACH_ROLE_END(r)
49414 +
49415 + acl_free_all();
49416 +
49417 + if (acl_role_set.r_hash) {
49418 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49419 + PAGE_SIZE)
49420 + kfree(acl_role_set.r_hash);
49421 + else
49422 + vfree(acl_role_set.r_hash);
49423 + }
49424 + if (name_set.n_hash) {
49425 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49426 + PAGE_SIZE)
49427 + kfree(name_set.n_hash);
49428 + else
49429 + vfree(name_set.n_hash);
49430 + }
49431 +
49432 + if (inodev_set.i_hash) {
49433 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49434 + PAGE_SIZE)
49435 + kfree(inodev_set.i_hash);
49436 + else
49437 + vfree(inodev_set.i_hash);
49438 + }
49439 +
49440 + gr_free_uidset();
49441 +
49442 + memset(&name_set, 0, sizeof (struct name_db));
49443 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49444 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49445 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49446 +
49447 + default_role = NULL;
49448 + role_list = NULL;
49449 +
49450 + return;
49451 +}
49452 +
49453 +static __u32
49454 +count_user_objs(struct acl_object_label *userp)
49455 +{
49456 + struct acl_object_label o_tmp;
49457 + __u32 num = 0;
49458 +
49459 + while (userp) {
49460 + if (copy_from_user(&o_tmp, userp,
49461 + sizeof (struct acl_object_label)))
49462 + break;
49463 +
49464 + userp = o_tmp.prev;
49465 + num++;
49466 + }
49467 +
49468 + return num;
49469 +}
49470 +
49471 +static struct acl_subject_label *
49472 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49473 +
49474 +static int
49475 +copy_user_glob(struct acl_object_label *obj)
49476 +{
49477 + struct acl_object_label *g_tmp, **guser;
49478 + unsigned int len;
49479 + char *tmp;
49480 +
49481 + if (obj->globbed == NULL)
49482 + return 0;
49483 +
49484 + guser = &obj->globbed;
49485 + while (*guser) {
49486 + g_tmp = (struct acl_object_label *)
49487 + acl_alloc(sizeof (struct acl_object_label));
49488 + if (g_tmp == NULL)
49489 + return -ENOMEM;
49490 +
49491 + if (copy_from_user(g_tmp, *guser,
49492 + sizeof (struct acl_object_label)))
49493 + return -EFAULT;
49494 +
49495 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49496 +
49497 + if (!len || len >= PATH_MAX)
49498 + return -EINVAL;
49499 +
49500 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49501 + return -ENOMEM;
49502 +
49503 + if (copy_from_user(tmp, g_tmp->filename, len))
49504 + return -EFAULT;
49505 + tmp[len-1] = '\0';
49506 + g_tmp->filename = tmp;
49507 +
49508 + *guser = g_tmp;
49509 + guser = &(g_tmp->next);
49510 + }
49511 +
49512 + return 0;
49513 +}
49514 +
49515 +static int
49516 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49517 + struct acl_role_label *role)
49518 +{
49519 + struct acl_object_label *o_tmp;
49520 + unsigned int len;
49521 + int ret;
49522 + char *tmp;
49523 +
49524 + while (userp) {
49525 + if ((o_tmp = (struct acl_object_label *)
49526 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49527 + return -ENOMEM;
49528 +
49529 + if (copy_from_user(o_tmp, userp,
49530 + sizeof (struct acl_object_label)))
49531 + return -EFAULT;
49532 +
49533 + userp = o_tmp->prev;
49534 +
49535 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49536 +
49537 + if (!len || len >= PATH_MAX)
49538 + return -EINVAL;
49539 +
49540 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49541 + return -ENOMEM;
49542 +
49543 + if (copy_from_user(tmp, o_tmp->filename, len))
49544 + return -EFAULT;
49545 + tmp[len-1] = '\0';
49546 + o_tmp->filename = tmp;
49547 +
49548 + insert_acl_obj_label(o_tmp, subj);
49549 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49550 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49551 + return -ENOMEM;
49552 +
49553 + ret = copy_user_glob(o_tmp);
49554 + if (ret)
49555 + return ret;
49556 +
49557 + if (o_tmp->nested) {
49558 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49559 + if (IS_ERR(o_tmp->nested))
49560 + return PTR_ERR(o_tmp->nested);
49561 +
49562 + /* insert into nested subject list */
49563 + o_tmp->nested->next = role->hash->first;
49564 + role->hash->first = o_tmp->nested;
49565 + }
49566 + }
49567 +
49568 + return 0;
49569 +}
49570 +
49571 +static __u32
49572 +count_user_subjs(struct acl_subject_label *userp)
49573 +{
49574 + struct acl_subject_label s_tmp;
49575 + __u32 num = 0;
49576 +
49577 + while (userp) {
49578 + if (copy_from_user(&s_tmp, userp,
49579 + sizeof (struct acl_subject_label)))
49580 + break;
49581 +
49582 + userp = s_tmp.prev;
49583 + /* do not count nested subjects against this count, since
49584 + they are not included in the hash table, but are
49585 + attached to objects. We have already counted
49586 + the subjects in userspace for the allocation
49587 + stack
49588 + */
49589 + if (!(s_tmp.mode & GR_NESTED))
49590 + num++;
49591 + }
49592 +
49593 + return num;
49594 +}
49595 +
49596 +static int
49597 +copy_user_allowedips(struct acl_role_label *rolep)
49598 +{
49599 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49600 +
49601 + ruserip = rolep->allowed_ips;
49602 +
49603 + while (ruserip) {
49604 + rlast = rtmp;
49605 +
49606 + if ((rtmp = (struct role_allowed_ip *)
49607 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49608 + return -ENOMEM;
49609 +
49610 + if (copy_from_user(rtmp, ruserip,
49611 + sizeof (struct role_allowed_ip)))
49612 + return -EFAULT;
49613 +
49614 + ruserip = rtmp->prev;
49615 +
49616 + if (!rlast) {
49617 + rtmp->prev = NULL;
49618 + rolep->allowed_ips = rtmp;
49619 + } else {
49620 + rlast->next = rtmp;
49621 + rtmp->prev = rlast;
49622 + }
49623 +
49624 + if (!ruserip)
49625 + rtmp->next = NULL;
49626 + }
49627 +
49628 + return 0;
49629 +}
49630 +
49631 +static int
49632 +copy_user_transitions(struct acl_role_label *rolep)
49633 +{
49634 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49635 +
49636 + unsigned int len;
49637 + char *tmp;
49638 +
49639 + rusertp = rolep->transitions;
49640 +
49641 + while (rusertp) {
49642 + rlast = rtmp;
49643 +
49644 + if ((rtmp = (struct role_transition *)
49645 + acl_alloc(sizeof (struct role_transition))) == NULL)
49646 + return -ENOMEM;
49647 +
49648 + if (copy_from_user(rtmp, rusertp,
49649 + sizeof (struct role_transition)))
49650 + return -EFAULT;
49651 +
49652 + rusertp = rtmp->prev;
49653 +
49654 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49655 +
49656 + if (!len || len >= GR_SPROLE_LEN)
49657 + return -EINVAL;
49658 +
49659 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49660 + return -ENOMEM;
49661 +
49662 + if (copy_from_user(tmp, rtmp->rolename, len))
49663 + return -EFAULT;
49664 + tmp[len-1] = '\0';
49665 + rtmp->rolename = tmp;
49666 +
49667 + if (!rlast) {
49668 + rtmp->prev = NULL;
49669 + rolep->transitions = rtmp;
49670 + } else {
49671 + rlast->next = rtmp;
49672 + rtmp->prev = rlast;
49673 + }
49674 +
49675 + if (!rusertp)
49676 + rtmp->next = NULL;
49677 + }
49678 +
49679 + return 0;
49680 +}
49681 +
49682 +static struct acl_subject_label *
49683 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49684 +{
49685 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49686 + unsigned int len;
49687 + char *tmp;
49688 + __u32 num_objs;
49689 + struct acl_ip_label **i_tmp, *i_utmp2;
49690 + struct gr_hash_struct ghash;
49691 + struct subject_map *subjmap;
49692 + unsigned int i_num;
49693 + int err;
49694 +
49695 + s_tmp = lookup_subject_map(userp);
49696 +
49697 + /* we've already copied this subject into the kernel, just return
49698 + the reference to it, and don't copy it over again
49699 + */
49700 + if (s_tmp)
49701 + return(s_tmp);
49702 +
49703 + if ((s_tmp = (struct acl_subject_label *)
49704 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49705 + return ERR_PTR(-ENOMEM);
49706 +
49707 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49708 + if (subjmap == NULL)
49709 + return ERR_PTR(-ENOMEM);
49710 +
49711 + subjmap->user = userp;
49712 + subjmap->kernel = s_tmp;
49713 + insert_subj_map_entry(subjmap);
49714 +
49715 + if (copy_from_user(s_tmp, userp,
49716 + sizeof (struct acl_subject_label)))
49717 + return ERR_PTR(-EFAULT);
49718 +
49719 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49720 +
49721 + if (!len || len >= PATH_MAX)
49722 + return ERR_PTR(-EINVAL);
49723 +
49724 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49725 + return ERR_PTR(-ENOMEM);
49726 +
49727 + if (copy_from_user(tmp, s_tmp->filename, len))
49728 + return ERR_PTR(-EFAULT);
49729 + tmp[len-1] = '\0';
49730 + s_tmp->filename = tmp;
49731 +
49732 + if (!strcmp(s_tmp->filename, "/"))
49733 + role->root_label = s_tmp;
49734 +
49735 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49736 + return ERR_PTR(-EFAULT);
49737 +
49738 + /* copy user and group transition tables */
49739 +
49740 + if (s_tmp->user_trans_num) {
49741 + uid_t *uidlist;
49742 +
49743 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49744 + if (uidlist == NULL)
49745 + return ERR_PTR(-ENOMEM);
49746 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49747 + return ERR_PTR(-EFAULT);
49748 +
49749 + s_tmp->user_transitions = uidlist;
49750 + }
49751 +
49752 + if (s_tmp->group_trans_num) {
49753 + gid_t *gidlist;
49754 +
49755 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49756 + if (gidlist == NULL)
49757 + return ERR_PTR(-ENOMEM);
49758 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49759 + return ERR_PTR(-EFAULT);
49760 +
49761 + s_tmp->group_transitions = gidlist;
49762 + }
49763 +
49764 + /* set up object hash table */
49765 + num_objs = count_user_objs(ghash.first);
49766 +
49767 + s_tmp->obj_hash_size = num_objs;
49768 + s_tmp->obj_hash =
49769 + (struct acl_object_label **)
49770 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49771 +
49772 + if (!s_tmp->obj_hash)
49773 + return ERR_PTR(-ENOMEM);
49774 +
49775 + memset(s_tmp->obj_hash, 0,
49776 + s_tmp->obj_hash_size *
49777 + sizeof (struct acl_object_label *));
49778 +
49779 + /* add in objects */
49780 + err = copy_user_objs(ghash.first, s_tmp, role);
49781 +
49782 + if (err)
49783 + return ERR_PTR(err);
49784 +
49785 + /* set pointer for parent subject */
49786 + if (s_tmp->parent_subject) {
49787 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49788 +
49789 + if (IS_ERR(s_tmp2))
49790 + return s_tmp2;
49791 +
49792 + s_tmp->parent_subject = s_tmp2;
49793 + }
49794 +
49795 + /* add in ip acls */
49796 +
49797 + if (!s_tmp->ip_num) {
49798 + s_tmp->ips = NULL;
49799 + goto insert;
49800 + }
49801 +
49802 + i_tmp =
49803 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49804 + sizeof (struct acl_ip_label *));
49805 +
49806 + if (!i_tmp)
49807 + return ERR_PTR(-ENOMEM);
49808 +
49809 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49810 + *(i_tmp + i_num) =
49811 + (struct acl_ip_label *)
49812 + acl_alloc(sizeof (struct acl_ip_label));
49813 + if (!*(i_tmp + i_num))
49814 + return ERR_PTR(-ENOMEM);
49815 +
49816 + if (copy_from_user
49817 + (&i_utmp2, s_tmp->ips + i_num,
49818 + sizeof (struct acl_ip_label *)))
49819 + return ERR_PTR(-EFAULT);
49820 +
49821 + if (copy_from_user
49822 + (*(i_tmp + i_num), i_utmp2,
49823 + sizeof (struct acl_ip_label)))
49824 + return ERR_PTR(-EFAULT);
49825 +
49826 + if ((*(i_tmp + i_num))->iface == NULL)
49827 + continue;
49828 +
49829 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49830 + if (!len || len >= IFNAMSIZ)
49831 + return ERR_PTR(-EINVAL);
49832 + tmp = acl_alloc(len);
49833 + if (tmp == NULL)
49834 + return ERR_PTR(-ENOMEM);
49835 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49836 + return ERR_PTR(-EFAULT);
49837 + (*(i_tmp + i_num))->iface = tmp;
49838 + }
49839 +
49840 + s_tmp->ips = i_tmp;
49841 +
49842 +insert:
49843 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49844 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49845 + return ERR_PTR(-ENOMEM);
49846 +
49847 + return s_tmp;
49848 +}
49849 +
49850 +static int
49851 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49852 +{
49853 + struct acl_subject_label s_pre;
49854 + struct acl_subject_label * ret;
49855 + int err;
49856 +
49857 + while (userp) {
49858 + if (copy_from_user(&s_pre, userp,
49859 + sizeof (struct acl_subject_label)))
49860 + return -EFAULT;
49861 +
49862 + /* do not add nested subjects here, add
49863 + while parsing objects
49864 + */
49865 +
49866 + if (s_pre.mode & GR_NESTED) {
49867 + userp = s_pre.prev;
49868 + continue;
49869 + }
49870 +
49871 + ret = do_copy_user_subj(userp, role);
49872 +
49873 + err = PTR_ERR(ret);
49874 + if (IS_ERR(ret))
49875 + return err;
49876 +
49877 + insert_acl_subj_label(ret, role);
49878 +
49879 + userp = s_pre.prev;
49880 + }
49881 +
49882 + return 0;
49883 +}
49884 +
49885 +static int
49886 +copy_user_acl(struct gr_arg *arg)
49887 +{
49888 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49889 + struct sprole_pw *sptmp;
49890 + struct gr_hash_struct *ghash;
49891 + uid_t *domainlist;
49892 + unsigned int r_num;
49893 + unsigned int len;
49894 + char *tmp;
49895 + int err = 0;
49896 + __u16 i;
49897 + __u32 num_subjs;
49898 +
49899 + /* we need a default and kernel role */
49900 + if (arg->role_db.num_roles < 2)
49901 + return -EINVAL;
49902 +
49903 + /* copy special role authentication info from userspace */
49904 +
49905 + num_sprole_pws = arg->num_sprole_pws;
49906 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49907 +
49908 + if (!acl_special_roles) {
49909 + err = -ENOMEM;
49910 + goto cleanup;
49911 + }
49912 +
49913 + for (i = 0; i < num_sprole_pws; i++) {
49914 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49915 + if (!sptmp) {
49916 + err = -ENOMEM;
49917 + goto cleanup;
49918 + }
49919 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49920 + sizeof (struct sprole_pw))) {
49921 + err = -EFAULT;
49922 + goto cleanup;
49923 + }
49924 +
49925 + len =
49926 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49927 +
49928 + if (!len || len >= GR_SPROLE_LEN) {
49929 + err = -EINVAL;
49930 + goto cleanup;
49931 + }
49932 +
49933 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49934 + err = -ENOMEM;
49935 + goto cleanup;
49936 + }
49937 +
49938 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49939 + err = -EFAULT;
49940 + goto cleanup;
49941 + }
49942 + tmp[len-1] = '\0';
49943 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49944 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49945 +#endif
49946 + sptmp->rolename = tmp;
49947 + acl_special_roles[i] = sptmp;
49948 + }
49949 +
49950 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49951 +
49952 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49953 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49954 +
49955 + if (!r_tmp) {
49956 + err = -ENOMEM;
49957 + goto cleanup;
49958 + }
49959 +
49960 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49961 + sizeof (struct acl_role_label *))) {
49962 + err = -EFAULT;
49963 + goto cleanup;
49964 + }
49965 +
49966 + if (copy_from_user(r_tmp, r_utmp2,
49967 + sizeof (struct acl_role_label))) {
49968 + err = -EFAULT;
49969 + goto cleanup;
49970 + }
49971 +
49972 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49973 +
49974 + if (!len || len >= PATH_MAX) {
49975 + err = -EINVAL;
49976 + goto cleanup;
49977 + }
49978 +
49979 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49980 + err = -ENOMEM;
49981 + goto cleanup;
49982 + }
49983 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
49984 + err = -EFAULT;
49985 + goto cleanup;
49986 + }
49987 + tmp[len-1] = '\0';
49988 + r_tmp->rolename = tmp;
49989 +
49990 + if (!strcmp(r_tmp->rolename, "default")
49991 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49992 + default_role = r_tmp;
49993 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49994 + kernel_role = r_tmp;
49995 + }
49996 +
49997 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49998 + err = -ENOMEM;
49999 + goto cleanup;
50000 + }
50001 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
50002 + err = -EFAULT;
50003 + goto cleanup;
50004 + }
50005 +
50006 + r_tmp->hash = ghash;
50007 +
50008 + num_subjs = count_user_subjs(r_tmp->hash->first);
50009 +
50010 + r_tmp->subj_hash_size = num_subjs;
50011 + r_tmp->subj_hash =
50012 + (struct acl_subject_label **)
50013 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
50014 +
50015 + if (!r_tmp->subj_hash) {
50016 + err = -ENOMEM;
50017 + goto cleanup;
50018 + }
50019 +
50020 + err = copy_user_allowedips(r_tmp);
50021 + if (err)
50022 + goto cleanup;
50023 +
50024 + /* copy domain info */
50025 + if (r_tmp->domain_children != NULL) {
50026 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
50027 + if (domainlist == NULL) {
50028 + err = -ENOMEM;
50029 + goto cleanup;
50030 + }
50031 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
50032 + err = -EFAULT;
50033 + goto cleanup;
50034 + }
50035 + r_tmp->domain_children = domainlist;
50036 + }
50037 +
50038 + err = copy_user_transitions(r_tmp);
50039 + if (err)
50040 + goto cleanup;
50041 +
50042 + memset(r_tmp->subj_hash, 0,
50043 + r_tmp->subj_hash_size *
50044 + sizeof (struct acl_subject_label *));
50045 +
50046 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
50047 +
50048 + if (err)
50049 + goto cleanup;
50050 +
50051 + /* set nested subject list to null */
50052 + r_tmp->hash->first = NULL;
50053 +
50054 + insert_acl_role_label(r_tmp);
50055 + }
50056 +
50057 + goto return_err;
50058 + cleanup:
50059 + free_variables();
50060 + return_err:
50061 + return err;
50062 +
50063 +}
50064 +
50065 +static int
50066 +gracl_init(struct gr_arg *args)
50067 +{
50068 + int error = 0;
50069 +
50070 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
50071 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
50072 +
50073 + if (init_variables(args)) {
50074 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
50075 + error = -ENOMEM;
50076 + free_variables();
50077 + goto out;
50078 + }
50079 +
50080 + error = copy_user_acl(args);
50081 + free_init_variables();
50082 + if (error) {
50083 + free_variables();
50084 + goto out;
50085 + }
50086 +
50087 + if ((error = gr_set_acls(0))) {
50088 + free_variables();
50089 + goto out;
50090 + }
50091 +
50092 + pax_open_kernel();
50093 + gr_status |= GR_READY;
50094 + pax_close_kernel();
50095 +
50096 + out:
50097 + return error;
50098 +}
50099 +
50100 +/* derived from glibc fnmatch() 0: match, 1: no match*/
50101 +
50102 +static int
50103 +glob_match(const char *p, const char *n)
50104 +{
50105 + char c;
50106 +
50107 + while ((c = *p++) != '\0') {
50108 + switch (c) {
50109 + case '?':
50110 + if (*n == '\0')
50111 + return 1;
50112 + else if (*n == '/')
50113 + return 1;
50114 + break;
50115 + case '\\':
50116 + if (*n != c)
50117 + return 1;
50118 + break;
50119 + case '*':
50120 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
50121 + if (*n == '/')
50122 + return 1;
50123 + else if (c == '?') {
50124 + if (*n == '\0')
50125 + return 1;
50126 + else
50127 + ++n;
50128 + }
50129 + }
50130 + if (c == '\0') {
50131 + return 0;
50132 + } else {
50133 + const char *endp;
50134 +
50135 + if ((endp = strchr(n, '/')) == NULL)
50136 + endp = n + strlen(n);
50137 +
50138 + if (c == '[') {
50139 + for (--p; n < endp; ++n)
50140 + if (!glob_match(p, n))
50141 + return 0;
50142 + } else if (c == '/') {
50143 + while (*n != '\0' && *n != '/')
50144 + ++n;
50145 + if (*n == '/' && !glob_match(p, n + 1))
50146 + return 0;
50147 + } else {
50148 + for (--p; n < endp; ++n)
50149 + if (*n == c && !glob_match(p, n))
50150 + return 0;
50151 + }
50152 +
50153 + return 1;
50154 + }
50155 + case '[':
50156 + {
50157 + int not;
50158 + char cold;
50159 +
50160 + if (*n == '\0' || *n == '/')
50161 + return 1;
50162 +
50163 + not = (*p == '!' || *p == '^');
50164 + if (not)
50165 + ++p;
50166 +
50167 + c = *p++;
50168 + for (;;) {
50169 + unsigned char fn = (unsigned char)*n;
50170 +
50171 + if (c == '\0')
50172 + return 1;
50173 + else {
50174 + if (c == fn)
50175 + goto matched;
50176 + cold = c;
50177 + c = *p++;
50178 +
50179 + if (c == '-' && *p != ']') {
50180 + unsigned char cend = *p++;
50181 +
50182 + if (cend == '\0')
50183 + return 1;
50184 +
50185 + if (cold <= fn && fn <= cend)
50186 + goto matched;
50187 +
50188 + c = *p++;
50189 + }
50190 + }
50191 +
50192 + if (c == ']')
50193 + break;
50194 + }
50195 + if (!not)
50196 + return 1;
50197 + break;
50198 + matched:
50199 + while (c != ']') {
50200 + if (c == '\0')
50201 + return 1;
50202 +
50203 + c = *p++;
50204 + }
50205 + if (not)
50206 + return 1;
50207 + }
50208 + break;
50209 + default:
50210 + if (c != *n)
50211 + return 1;
50212 + }
50213 +
50214 + ++n;
50215 + }
50216 +
50217 + if (*n == '\0')
50218 + return 0;
50219 +
50220 + if (*n == '/')
50221 + return 0;
50222 +
50223 + return 1;
50224 +}
50225 +
50226 +static struct acl_object_label *
50227 +chk_glob_label(struct acl_object_label *globbed,
50228 + struct dentry *dentry, struct vfsmount *mnt, char **path)
50229 +{
50230 + struct acl_object_label *tmp;
50231 +
50232 + if (*path == NULL)
50233 + *path = gr_to_filename_nolock(dentry, mnt);
50234 +
50235 + tmp = globbed;
50236 +
50237 + while (tmp) {
50238 + if (!glob_match(tmp->filename, *path))
50239 + return tmp;
50240 + tmp = tmp->next;
50241 + }
50242 +
50243 + return NULL;
50244 +}
50245 +
50246 +static struct acl_object_label *
50247 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50248 + const ino_t curr_ino, const dev_t curr_dev,
50249 + const struct acl_subject_label *subj, char **path, const int checkglob)
50250 +{
50251 + struct acl_subject_label *tmpsubj;
50252 + struct acl_object_label *retval;
50253 + struct acl_object_label *retval2;
50254 +
50255 + tmpsubj = (struct acl_subject_label *) subj;
50256 + read_lock(&gr_inode_lock);
50257 + do {
50258 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50259 + if (retval) {
50260 + if (checkglob && retval->globbed) {
50261 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50262 + (struct vfsmount *)orig_mnt, path);
50263 + if (retval2)
50264 + retval = retval2;
50265 + }
50266 + break;
50267 + }
50268 + } while ((tmpsubj = tmpsubj->parent_subject));
50269 + read_unlock(&gr_inode_lock);
50270 +
50271 + return retval;
50272 +}
50273 +
50274 +static __inline__ struct acl_object_label *
50275 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50276 + struct dentry *curr_dentry,
50277 + const struct acl_subject_label *subj, char **path, const int checkglob)
50278 +{
50279 + int newglob = checkglob;
50280 + ino_t inode;
50281 + dev_t device;
50282 +
50283 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50284 + as we don't want a / * rule to match instead of the / object
50285 + don't do this for create lookups that call this function though, since they're looking up
50286 + on the parent and thus need globbing checks on all paths
50287 + */
50288 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50289 + newglob = GR_NO_GLOB;
50290 +
50291 + spin_lock(&curr_dentry->d_lock);
50292 + inode = curr_dentry->d_inode->i_ino;
50293 + device = __get_dev(curr_dentry);
50294 + spin_unlock(&curr_dentry->d_lock);
50295 +
50296 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50297 +}
50298 +
50299 +static struct acl_object_label *
50300 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50301 + const struct acl_subject_label *subj, char *path, const int checkglob)
50302 +{
50303 + struct dentry *dentry = (struct dentry *) l_dentry;
50304 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50305 + struct acl_object_label *retval;
50306 + struct dentry *parent;
50307 +
50308 + write_seqlock(&rename_lock);
50309 + br_read_lock(vfsmount_lock);
50310 +
50311 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50312 +#ifdef CONFIG_NET
50313 + mnt == sock_mnt ||
50314 +#endif
50315 +#ifdef CONFIG_HUGETLBFS
50316 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50317 +#endif
50318 + /* ignore Eric Biederman */
50319 + IS_PRIVATE(l_dentry->d_inode))) {
50320 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50321 + goto out;
50322 + }
50323 +
50324 + for (;;) {
50325 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50326 + break;
50327 +
50328 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50329 + if (mnt->mnt_parent == mnt)
50330 + break;
50331 +
50332 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50333 + if (retval != NULL)
50334 + goto out;
50335 +
50336 + dentry = mnt->mnt_mountpoint;
50337 + mnt = mnt->mnt_parent;
50338 + continue;
50339 + }
50340 +
50341 + parent = dentry->d_parent;
50342 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50343 + if (retval != NULL)
50344 + goto out;
50345 +
50346 + dentry = parent;
50347 + }
50348 +
50349 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50350 +
50351 + /* real_root is pinned so we don't have to hold a reference */
50352 + if (retval == NULL)
50353 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50354 +out:
50355 + br_read_unlock(vfsmount_lock);
50356 + write_sequnlock(&rename_lock);
50357 +
50358 + BUG_ON(retval == NULL);
50359 +
50360 + return retval;
50361 +}
50362 +
50363 +static __inline__ struct acl_object_label *
50364 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50365 + const struct acl_subject_label *subj)
50366 +{
50367 + char *path = NULL;
50368 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50369 +}
50370 +
50371 +static __inline__ struct acl_object_label *
50372 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50373 + const struct acl_subject_label *subj)
50374 +{
50375 + char *path = NULL;
50376 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50377 +}
50378 +
50379 +static __inline__ struct acl_object_label *
50380 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50381 + const struct acl_subject_label *subj, char *path)
50382 +{
50383 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50384 +}
50385 +
50386 +static struct acl_subject_label *
50387 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50388 + const struct acl_role_label *role)
50389 +{
50390 + struct dentry *dentry = (struct dentry *) l_dentry;
50391 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50392 + struct acl_subject_label *retval;
50393 + struct dentry *parent;
50394 +
50395 + write_seqlock(&rename_lock);
50396 + br_read_lock(vfsmount_lock);
50397 +
50398 + for (;;) {
50399 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50400 + break;
50401 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50402 + if (mnt->mnt_parent == mnt)
50403 + break;
50404 +
50405 + spin_lock(&dentry->d_lock);
50406 + read_lock(&gr_inode_lock);
50407 + retval =
50408 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50409 + __get_dev(dentry), role);
50410 + read_unlock(&gr_inode_lock);
50411 + spin_unlock(&dentry->d_lock);
50412 + if (retval != NULL)
50413 + goto out;
50414 +
50415 + dentry = mnt->mnt_mountpoint;
50416 + mnt = mnt->mnt_parent;
50417 + continue;
50418 + }
50419 +
50420 + spin_lock(&dentry->d_lock);
50421 + read_lock(&gr_inode_lock);
50422 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50423 + __get_dev(dentry), role);
50424 + read_unlock(&gr_inode_lock);
50425 + parent = dentry->d_parent;
50426 + spin_unlock(&dentry->d_lock);
50427 +
50428 + if (retval != NULL)
50429 + goto out;
50430 +
50431 + dentry = parent;
50432 + }
50433 +
50434 + spin_lock(&dentry->d_lock);
50435 + read_lock(&gr_inode_lock);
50436 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50437 + __get_dev(dentry), role);
50438 + read_unlock(&gr_inode_lock);
50439 + spin_unlock(&dentry->d_lock);
50440 +
50441 + if (unlikely(retval == NULL)) {
50442 + /* real_root is pinned, we don't need to hold a reference */
50443 + read_lock(&gr_inode_lock);
50444 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50445 + __get_dev(real_root.dentry), role);
50446 + read_unlock(&gr_inode_lock);
50447 + }
50448 +out:
50449 + br_read_unlock(vfsmount_lock);
50450 + write_sequnlock(&rename_lock);
50451 +
50452 + BUG_ON(retval == NULL);
50453 +
50454 + return retval;
50455 +}
50456 +
50457 +static void
50458 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50459 +{
50460 + struct task_struct *task = current;
50461 + const struct cred *cred = current_cred();
50462 +
50463 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50464 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50465 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50466 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50467 +
50468 + return;
50469 +}
50470 +
50471 +static void
50472 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50473 +{
50474 + struct task_struct *task = current;
50475 + const struct cred *cred = current_cred();
50476 +
50477 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50478 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50479 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50480 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50481 +
50482 + return;
50483 +}
50484 +
50485 +static void
50486 +gr_log_learn_id_change(const char type, const unsigned int real,
50487 + const unsigned int effective, const unsigned int fs)
50488 +{
50489 + struct task_struct *task = current;
50490 + const struct cred *cred = current_cred();
50491 +
50492 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50493 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50494 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50495 + type, real, effective, fs, &task->signal->saved_ip);
50496 +
50497 + return;
50498 +}
50499 +
50500 +__u32
50501 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50502 + const struct vfsmount * mnt)
50503 +{
50504 + __u32 retval = mode;
50505 + struct acl_subject_label *curracl;
50506 + struct acl_object_label *currobj;
50507 +
50508 + if (unlikely(!(gr_status & GR_READY)))
50509 + return (mode & ~GR_AUDITS);
50510 +
50511 + curracl = current->acl;
50512 +
50513 + currobj = chk_obj_label(dentry, mnt, curracl);
50514 + retval = currobj->mode & mode;
50515 +
50516 + /* if we're opening a specified transfer file for writing
50517 + (e.g. /dev/initctl), then transfer our role to init
50518 + */
50519 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50520 + current->role->roletype & GR_ROLE_PERSIST)) {
50521 + struct task_struct *task = init_pid_ns.child_reaper;
50522 +
50523 + if (task->role != current->role) {
50524 + task->acl_sp_role = 0;
50525 + task->acl_role_id = current->acl_role_id;
50526 + task->role = current->role;
50527 + rcu_read_lock();
50528 + read_lock(&grsec_exec_file_lock);
50529 + gr_apply_subject_to_task(task);
50530 + read_unlock(&grsec_exec_file_lock);
50531 + rcu_read_unlock();
50532 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50533 + }
50534 + }
50535 +
50536 + if (unlikely
50537 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50538 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50539 + __u32 new_mode = mode;
50540 +
50541 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50542 +
50543 + retval = new_mode;
50544 +
50545 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50546 + new_mode |= GR_INHERIT;
50547 +
50548 + if (!(mode & GR_NOLEARN))
50549 + gr_log_learn(dentry, mnt, new_mode);
50550 + }
50551 +
50552 + return retval;
50553 +}
50554 +
50555 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50556 + const struct dentry *parent,
50557 + const struct vfsmount *mnt)
50558 +{
50559 + struct name_entry *match;
50560 + struct acl_object_label *matchpo;
50561 + struct acl_subject_label *curracl;
50562 + char *path;
50563 +
50564 + if (unlikely(!(gr_status & GR_READY)))
50565 + return NULL;
50566 +
50567 + preempt_disable();
50568 + path = gr_to_filename_rbac(new_dentry, mnt);
50569 + match = lookup_name_entry_create(path);
50570 +
50571 + curracl = current->acl;
50572 +
50573 + if (match) {
50574 + read_lock(&gr_inode_lock);
50575 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50576 + read_unlock(&gr_inode_lock);
50577 +
50578 + if (matchpo) {
50579 + preempt_enable();
50580 + return matchpo;
50581 + }
50582 + }
50583 +
50584 + // lookup parent
50585 +
50586 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50587 +
50588 + preempt_enable();
50589 + return matchpo;
50590 +}
50591 +
50592 +__u32
50593 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50594 + const struct vfsmount * mnt, const __u32 mode)
50595 +{
50596 + struct acl_object_label *matchpo;
50597 + __u32 retval;
50598 +
50599 + if (unlikely(!(gr_status & GR_READY)))
50600 + return (mode & ~GR_AUDITS);
50601 +
50602 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50603 +
50604 + retval = matchpo->mode & mode;
50605 +
50606 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50607 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50608 + __u32 new_mode = mode;
50609 +
50610 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50611 +
50612 + gr_log_learn(new_dentry, mnt, new_mode);
50613 + return new_mode;
50614 + }
50615 +
50616 + return retval;
50617 +}
50618 +
50619 +__u32
50620 +gr_check_link(const struct dentry * new_dentry,
50621 + const struct dentry * parent_dentry,
50622 + const struct vfsmount * parent_mnt,
50623 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50624 +{
50625 + struct acl_object_label *obj;
50626 + __u32 oldmode, newmode;
50627 + __u32 needmode;
50628 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50629 + GR_DELETE | GR_INHERIT;
50630 +
50631 + if (unlikely(!(gr_status & GR_READY)))
50632 + return (GR_CREATE | GR_LINK);
50633 +
50634 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50635 + oldmode = obj->mode;
50636 +
50637 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50638 + newmode = obj->mode;
50639 +
50640 + needmode = newmode & checkmodes;
50641 +
50642 + // old name for hardlink must have at least the permissions of the new name
50643 + if ((oldmode & needmode) != needmode)
50644 + goto bad;
50645 +
50646 + // if old name had restrictions/auditing, make sure the new name does as well
50647 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50648 +
50649 + // don't allow hardlinking of suid/sgid files without permission
50650 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50651 + needmode |= GR_SETID;
50652 +
50653 + if ((newmode & needmode) != needmode)
50654 + goto bad;
50655 +
50656 + // enforce minimum permissions
50657 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50658 + return newmode;
50659 +bad:
50660 + needmode = oldmode;
50661 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50662 + needmode |= GR_SETID;
50663 +
50664 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50665 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50666 + return (GR_CREATE | GR_LINK);
50667 + } else if (newmode & GR_SUPPRESS)
50668 + return GR_SUPPRESS;
50669 + else
50670 + return 0;
50671 +}
50672 +
50673 +int
50674 +gr_check_hidden_task(const struct task_struct *task)
50675 +{
50676 + if (unlikely(!(gr_status & GR_READY)))
50677 + return 0;
50678 +
50679 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50680 + return 1;
50681 +
50682 + return 0;
50683 +}
50684 +
50685 +int
50686 +gr_check_protected_task(const struct task_struct *task)
50687 +{
50688 + if (unlikely(!(gr_status & GR_READY) || !task))
50689 + return 0;
50690 +
50691 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50692 + task->acl != current->acl)
50693 + return 1;
50694 +
50695 + return 0;
50696 +}
50697 +
50698 +int
50699 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50700 +{
50701 + struct task_struct *p;
50702 + int ret = 0;
50703 +
50704 + if (unlikely(!(gr_status & GR_READY) || !pid))
50705 + return ret;
50706 +
50707 + read_lock(&tasklist_lock);
50708 + do_each_pid_task(pid, type, p) {
50709 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50710 + p->acl != current->acl) {
50711 + ret = 1;
50712 + goto out;
50713 + }
50714 + } while_each_pid_task(pid, type, p);
50715 +out:
50716 + read_unlock(&tasklist_lock);
50717 +
50718 + return ret;
50719 +}
50720 +
50721 +void
50722 +gr_copy_label(struct task_struct *tsk)
50723 +{
50724 + /* plain copying of fields is already done by dup_task_struct */
50725 + tsk->signal->used_accept = 0;
50726 + tsk->acl_sp_role = 0;
50727 + //tsk->acl_role_id = current->acl_role_id;
50728 + //tsk->acl = current->acl;
50729 + //tsk->role = current->role;
50730 + tsk->signal->curr_ip = current->signal->curr_ip;
50731 + tsk->signal->saved_ip = current->signal->saved_ip;
50732 + if (current->exec_file)
50733 + get_file(current->exec_file);
50734 + //tsk->exec_file = current->exec_file;
50735 + //tsk->is_writable = current->is_writable;
50736 + if (unlikely(current->signal->used_accept)) {
50737 + current->signal->curr_ip = 0;
50738 + current->signal->saved_ip = 0;
50739 + }
50740 +
50741 + return;
50742 +}
50743 +
50744 +static void
50745 +gr_set_proc_res(struct task_struct *task)
50746 +{
50747 + struct acl_subject_label *proc;
50748 + unsigned short i;
50749 +
50750 + proc = task->acl;
50751 +
50752 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50753 + return;
50754 +
50755 + for (i = 0; i < RLIM_NLIMITS; i++) {
50756 + if (!(proc->resmask & (1 << i)))
50757 + continue;
50758 +
50759 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50760 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50761 + }
50762 +
50763 + return;
50764 +}
50765 +
50766 +extern int __gr_process_user_ban(struct user_struct *user);
50767 +
50768 +int
50769 +gr_check_user_change(int real, int effective, int fs)
50770 +{
50771 + unsigned int i;
50772 + __u16 num;
50773 + uid_t *uidlist;
50774 + int curuid;
50775 + int realok = 0;
50776 + int effectiveok = 0;
50777 + int fsok = 0;
50778 +
50779 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50780 + struct user_struct *user;
50781 +
50782 + if (real == -1)
50783 + goto skipit;
50784 +
50785 + user = find_user(real);
50786 + if (user == NULL)
50787 + goto skipit;
50788 +
50789 + if (__gr_process_user_ban(user)) {
50790 + /* for find_user */
50791 + free_uid(user);
50792 + return 1;
50793 + }
50794 +
50795 + /* for find_user */
50796 + free_uid(user);
50797 +
50798 +skipit:
50799 +#endif
50800 +
50801 + if (unlikely(!(gr_status & GR_READY)))
50802 + return 0;
50803 +
50804 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50805 + gr_log_learn_id_change('u', real, effective, fs);
50806 +
50807 + num = current->acl->user_trans_num;
50808 + uidlist = current->acl->user_transitions;
50809 +
50810 + if (uidlist == NULL)
50811 + return 0;
50812 +
50813 + if (real == -1)
50814 + realok = 1;
50815 + if (effective == -1)
50816 + effectiveok = 1;
50817 + if (fs == -1)
50818 + fsok = 1;
50819 +
50820 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50821 + for (i = 0; i < num; i++) {
50822 + curuid = (int)uidlist[i];
50823 + if (real == curuid)
50824 + realok = 1;
50825 + if (effective == curuid)
50826 + effectiveok = 1;
50827 + if (fs == curuid)
50828 + fsok = 1;
50829 + }
50830 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50831 + for (i = 0; i < num; i++) {
50832 + curuid = (int)uidlist[i];
50833 + if (real == curuid)
50834 + break;
50835 + if (effective == curuid)
50836 + break;
50837 + if (fs == curuid)
50838 + break;
50839 + }
50840 + /* not in deny list */
50841 + if (i == num) {
50842 + realok = 1;
50843 + effectiveok = 1;
50844 + fsok = 1;
50845 + }
50846 + }
50847 +
50848 + if (realok && effectiveok && fsok)
50849 + return 0;
50850 + else {
50851 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50852 + return 1;
50853 + }
50854 +}
50855 +
50856 +int
50857 +gr_check_group_change(int real, int effective, int fs)
50858 +{
50859 + unsigned int i;
50860 + __u16 num;
50861 + gid_t *gidlist;
50862 + int curgid;
50863 + int realok = 0;
50864 + int effectiveok = 0;
50865 + int fsok = 0;
50866 +
50867 + if (unlikely(!(gr_status & GR_READY)))
50868 + return 0;
50869 +
50870 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50871 + gr_log_learn_id_change('g', real, effective, fs);
50872 +
50873 + num = current->acl->group_trans_num;
50874 + gidlist = current->acl->group_transitions;
50875 +
50876 + if (gidlist == NULL)
50877 + return 0;
50878 +
50879 + if (real == -1)
50880 + realok = 1;
50881 + if (effective == -1)
50882 + effectiveok = 1;
50883 + if (fs == -1)
50884 + fsok = 1;
50885 +
50886 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50887 + for (i = 0; i < num; i++) {
50888 + curgid = (int)gidlist[i];
50889 + if (real == curgid)
50890 + realok = 1;
50891 + if (effective == curgid)
50892 + effectiveok = 1;
50893 + if (fs == curgid)
50894 + fsok = 1;
50895 + }
50896 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50897 + for (i = 0; i < num; i++) {
50898 + curgid = (int)gidlist[i];
50899 + if (real == curgid)
50900 + break;
50901 + if (effective == curgid)
50902 + break;
50903 + if (fs == curgid)
50904 + break;
50905 + }
50906 + /* not in deny list */
50907 + if (i == num) {
50908 + realok = 1;
50909 + effectiveok = 1;
50910 + fsok = 1;
50911 + }
50912 + }
50913 +
50914 + if (realok && effectiveok && fsok)
50915 + return 0;
50916 + else {
50917 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50918 + return 1;
50919 + }
50920 +}
50921 +
50922 +extern int gr_acl_is_capable(const int cap);
50923 +
50924 +void
50925 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50926 +{
50927 + struct acl_role_label *role = task->role;
50928 + struct acl_subject_label *subj = NULL;
50929 + struct acl_object_label *obj;
50930 + struct file *filp;
50931 +
50932 + if (unlikely(!(gr_status & GR_READY)))
50933 + return;
50934 +
50935 + filp = task->exec_file;
50936 +
50937 + /* kernel process, we'll give them the kernel role */
50938 + if (unlikely(!filp)) {
50939 + task->role = kernel_role;
50940 + task->acl = kernel_role->root_label;
50941 + return;
50942 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50943 + role = lookup_acl_role_label(task, uid, gid);
50944 +
50945 + /* don't change the role if we're not a privileged process */
50946 + if (role && task->role != role &&
50947 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50948 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50949 + return;
50950 +
50951 + /* perform subject lookup in possibly new role
50952 + we can use this result below in the case where role == task->role
50953 + */
50954 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50955 +
50956 + /* if we changed uid/gid, but result in the same role
50957 + and are using inheritance, don't lose the inherited subject
50958 + if current subject is other than what normal lookup
50959 + would result in, we arrived via inheritance, don't
50960 + lose subject
50961 + */
50962 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50963 + (subj == task->acl)))
50964 + task->acl = subj;
50965 +
50966 + task->role = role;
50967 +
50968 + task->is_writable = 0;
50969 +
50970 + /* ignore additional mmap checks for processes that are writable
50971 + by the default ACL */
50972 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50973 + if (unlikely(obj->mode & GR_WRITE))
50974 + task->is_writable = 1;
50975 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50976 + if (unlikely(obj->mode & GR_WRITE))
50977 + task->is_writable = 1;
50978 +
50979 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50980 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50981 +#endif
50982 +
50983 + gr_set_proc_res(task);
50984 +
50985 + return;
50986 +}
50987 +
50988 +int
50989 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50990 + const int unsafe_flags)
50991 +{
50992 + struct task_struct *task = current;
50993 + struct acl_subject_label *newacl;
50994 + struct acl_object_label *obj;
50995 + __u32 retmode;
50996 +
50997 + if (unlikely(!(gr_status & GR_READY)))
50998 + return 0;
50999 +
51000 + newacl = chk_subj_label(dentry, mnt, task->role);
51001 +
51002 + task_lock(task);
51003 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
51004 + !(task->role->roletype & GR_ROLE_GOD) &&
51005 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
51006 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51007 + task_unlock(task);
51008 + if (unsafe_flags & LSM_UNSAFE_SHARE)
51009 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
51010 + else
51011 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
51012 + return -EACCES;
51013 + }
51014 + task_unlock(task);
51015 +
51016 + obj = chk_obj_label(dentry, mnt, task->acl);
51017 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
51018 +
51019 + if (!(task->acl->mode & GR_INHERITLEARN) &&
51020 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
51021 + if (obj->nested)
51022 + task->acl = obj->nested;
51023 + else
51024 + task->acl = newacl;
51025 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
51026 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
51027 +
51028 + task->is_writable = 0;
51029 +
51030 + /* ignore additional mmap checks for processes that are writable
51031 + by the default ACL */
51032 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
51033 + if (unlikely(obj->mode & GR_WRITE))
51034 + task->is_writable = 1;
51035 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
51036 + if (unlikely(obj->mode & GR_WRITE))
51037 + task->is_writable = 1;
51038 +
51039 + gr_set_proc_res(task);
51040 +
51041 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51042 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51043 +#endif
51044 + return 0;
51045 +}
51046 +
51047 +/* always called with valid inodev ptr */
51048 +static void
51049 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
51050 +{
51051 + struct acl_object_label *matchpo;
51052 + struct acl_subject_label *matchps;
51053 + struct acl_subject_label *subj;
51054 + struct acl_role_label *role;
51055 + unsigned int x;
51056 +
51057 + FOR_EACH_ROLE_START(role)
51058 + FOR_EACH_SUBJECT_START(role, subj, x)
51059 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
51060 + matchpo->mode |= GR_DELETED;
51061 + FOR_EACH_SUBJECT_END(subj,x)
51062 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51063 + if (subj->inode == ino && subj->device == dev)
51064 + subj->mode |= GR_DELETED;
51065 + FOR_EACH_NESTED_SUBJECT_END(subj)
51066 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
51067 + matchps->mode |= GR_DELETED;
51068 + FOR_EACH_ROLE_END(role)
51069 +
51070 + inodev->nentry->deleted = 1;
51071 +
51072 + return;
51073 +}
51074 +
51075 +void
51076 +gr_handle_delete(const ino_t ino, const dev_t dev)
51077 +{
51078 + struct inodev_entry *inodev;
51079 +
51080 + if (unlikely(!(gr_status & GR_READY)))
51081 + return;
51082 +
51083 + write_lock(&gr_inode_lock);
51084 + inodev = lookup_inodev_entry(ino, dev);
51085 + if (inodev != NULL)
51086 + do_handle_delete(inodev, ino, dev);
51087 + write_unlock(&gr_inode_lock);
51088 +
51089 + return;
51090 +}
51091 +
51092 +static void
51093 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
51094 + const ino_t newinode, const dev_t newdevice,
51095 + struct acl_subject_label *subj)
51096 +{
51097 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
51098 + struct acl_object_label *match;
51099 +
51100 + match = subj->obj_hash[index];
51101 +
51102 + while (match && (match->inode != oldinode ||
51103 + match->device != olddevice ||
51104 + !(match->mode & GR_DELETED)))
51105 + match = match->next;
51106 +
51107 + if (match && (match->inode == oldinode)
51108 + && (match->device == olddevice)
51109 + && (match->mode & GR_DELETED)) {
51110 + if (match->prev == NULL) {
51111 + subj->obj_hash[index] = match->next;
51112 + if (match->next != NULL)
51113 + match->next->prev = NULL;
51114 + } else {
51115 + match->prev->next = match->next;
51116 + if (match->next != NULL)
51117 + match->next->prev = match->prev;
51118 + }
51119 + match->prev = NULL;
51120 + match->next = NULL;
51121 + match->inode = newinode;
51122 + match->device = newdevice;
51123 + match->mode &= ~GR_DELETED;
51124 +
51125 + insert_acl_obj_label(match, subj);
51126 + }
51127 +
51128 + return;
51129 +}
51130 +
51131 +static void
51132 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
51133 + const ino_t newinode, const dev_t newdevice,
51134 + struct acl_role_label *role)
51135 +{
51136 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
51137 + struct acl_subject_label *match;
51138 +
51139 + match = role->subj_hash[index];
51140 +
51141 + while (match && (match->inode != oldinode ||
51142 + match->device != olddevice ||
51143 + !(match->mode & GR_DELETED)))
51144 + match = match->next;
51145 +
51146 + if (match && (match->inode == oldinode)
51147 + && (match->device == olddevice)
51148 + && (match->mode & GR_DELETED)) {
51149 + if (match->prev == NULL) {
51150 + role->subj_hash[index] = match->next;
51151 + if (match->next != NULL)
51152 + match->next->prev = NULL;
51153 + } else {
51154 + match->prev->next = match->next;
51155 + if (match->next != NULL)
51156 + match->next->prev = match->prev;
51157 + }
51158 + match->prev = NULL;
51159 + match->next = NULL;
51160 + match->inode = newinode;
51161 + match->device = newdevice;
51162 + match->mode &= ~GR_DELETED;
51163 +
51164 + insert_acl_subj_label(match, role);
51165 + }
51166 +
51167 + return;
51168 +}
51169 +
51170 +static void
51171 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51172 + const ino_t newinode, const dev_t newdevice)
51173 +{
51174 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51175 + struct inodev_entry *match;
51176 +
51177 + match = inodev_set.i_hash[index];
51178 +
51179 + while (match && (match->nentry->inode != oldinode ||
51180 + match->nentry->device != olddevice || !match->nentry->deleted))
51181 + match = match->next;
51182 +
51183 + if (match && (match->nentry->inode == oldinode)
51184 + && (match->nentry->device == olddevice) &&
51185 + match->nentry->deleted) {
51186 + if (match->prev == NULL) {
51187 + inodev_set.i_hash[index] = match->next;
51188 + if (match->next != NULL)
51189 + match->next->prev = NULL;
51190 + } else {
51191 + match->prev->next = match->next;
51192 + if (match->next != NULL)
51193 + match->next->prev = match->prev;
51194 + }
51195 + match->prev = NULL;
51196 + match->next = NULL;
51197 + match->nentry->inode = newinode;
51198 + match->nentry->device = newdevice;
51199 + match->nentry->deleted = 0;
51200 +
51201 + insert_inodev_entry(match);
51202 + }
51203 +
51204 + return;
51205 +}
51206 +
51207 +static void
51208 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51209 +{
51210 + struct acl_subject_label *subj;
51211 + struct acl_role_label *role;
51212 + unsigned int x;
51213 +
51214 + FOR_EACH_ROLE_START(role)
51215 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51216 +
51217 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51218 + if ((subj->inode == ino) && (subj->device == dev)) {
51219 + subj->inode = ino;
51220 + subj->device = dev;
51221 + }
51222 + FOR_EACH_NESTED_SUBJECT_END(subj)
51223 + FOR_EACH_SUBJECT_START(role, subj, x)
51224 + update_acl_obj_label(matchn->inode, matchn->device,
51225 + ino, dev, subj);
51226 + FOR_EACH_SUBJECT_END(subj,x)
51227 + FOR_EACH_ROLE_END(role)
51228 +
51229 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51230 +
51231 + return;
51232 +}
51233 +
51234 +static void
51235 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51236 + const struct vfsmount *mnt)
51237 +{
51238 + ino_t ino = dentry->d_inode->i_ino;
51239 + dev_t dev = __get_dev(dentry);
51240 +
51241 + __do_handle_create(matchn, ino, dev);
51242 +
51243 + return;
51244 +}
51245 +
51246 +void
51247 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51248 +{
51249 + struct name_entry *matchn;
51250 +
51251 + if (unlikely(!(gr_status & GR_READY)))
51252 + return;
51253 +
51254 + preempt_disable();
51255 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51256 +
51257 + if (unlikely((unsigned long)matchn)) {
51258 + write_lock(&gr_inode_lock);
51259 + do_handle_create(matchn, dentry, mnt);
51260 + write_unlock(&gr_inode_lock);
51261 + }
51262 + preempt_enable();
51263 +
51264 + return;
51265 +}
51266 +
51267 +void
51268 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51269 +{
51270 + struct name_entry *matchn;
51271 +
51272 + if (unlikely(!(gr_status & GR_READY)))
51273 + return;
51274 +
51275 + preempt_disable();
51276 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51277 +
51278 + if (unlikely((unsigned long)matchn)) {
51279 + write_lock(&gr_inode_lock);
51280 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51281 + write_unlock(&gr_inode_lock);
51282 + }
51283 + preempt_enable();
51284 +
51285 + return;
51286 +}
51287 +
51288 +void
51289 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51290 + struct dentry *old_dentry,
51291 + struct dentry *new_dentry,
51292 + struct vfsmount *mnt, const __u8 replace)
51293 +{
51294 + struct name_entry *matchn;
51295 + struct inodev_entry *inodev;
51296 + struct inode *inode = new_dentry->d_inode;
51297 + ino_t old_ino = old_dentry->d_inode->i_ino;
51298 + dev_t old_dev = __get_dev(old_dentry);
51299 +
51300 + /* vfs_rename swaps the name and parent link for old_dentry and
51301 + new_dentry
51302 + at this point, old_dentry has the new name, parent link, and inode
51303 + for the renamed file
51304 + if a file is being replaced by a rename, new_dentry has the inode
51305 + and name for the replaced file
51306 + */
51307 +
51308 + if (unlikely(!(gr_status & GR_READY)))
51309 + return;
51310 +
51311 + preempt_disable();
51312 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51313 +
51314 + /* we wouldn't have to check d_inode if it weren't for
51315 + NFS silly-renaming
51316 + */
51317 +
51318 + write_lock(&gr_inode_lock);
51319 + if (unlikely(replace && inode)) {
51320 + ino_t new_ino = inode->i_ino;
51321 + dev_t new_dev = __get_dev(new_dentry);
51322 +
51323 + inodev = lookup_inodev_entry(new_ino, new_dev);
51324 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51325 + do_handle_delete(inodev, new_ino, new_dev);
51326 + }
51327 +
51328 + inodev = lookup_inodev_entry(old_ino, old_dev);
51329 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51330 + do_handle_delete(inodev, old_ino, old_dev);
51331 +
51332 + if (unlikely((unsigned long)matchn))
51333 + do_handle_create(matchn, old_dentry, mnt);
51334 +
51335 + write_unlock(&gr_inode_lock);
51336 + preempt_enable();
51337 +
51338 + return;
51339 +}
51340 +
51341 +static int
51342 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51343 + unsigned char **sum)
51344 +{
51345 + struct acl_role_label *r;
51346 + struct role_allowed_ip *ipp;
51347 + struct role_transition *trans;
51348 + unsigned int i;
51349 + int found = 0;
51350 + u32 curr_ip = current->signal->curr_ip;
51351 +
51352 + current->signal->saved_ip = curr_ip;
51353 +
51354 + /* check transition table */
51355 +
51356 + for (trans = current->role->transitions; trans; trans = trans->next) {
51357 + if (!strcmp(rolename, trans->rolename)) {
51358 + found = 1;
51359 + break;
51360 + }
51361 + }
51362 +
51363 + if (!found)
51364 + return 0;
51365 +
51366 + /* handle special roles that do not require authentication
51367 + and check ip */
51368 +
51369 + FOR_EACH_ROLE_START(r)
51370 + if (!strcmp(rolename, r->rolename) &&
51371 + (r->roletype & GR_ROLE_SPECIAL)) {
51372 + found = 0;
51373 + if (r->allowed_ips != NULL) {
51374 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51375 + if ((ntohl(curr_ip) & ipp->netmask) ==
51376 + (ntohl(ipp->addr) & ipp->netmask))
51377 + found = 1;
51378 + }
51379 + } else
51380 + found = 2;
51381 + if (!found)
51382 + return 0;
51383 +
51384 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51385 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51386 + *salt = NULL;
51387 + *sum = NULL;
51388 + return 1;
51389 + }
51390 + }
51391 + FOR_EACH_ROLE_END(r)
51392 +
51393 + for (i = 0; i < num_sprole_pws; i++) {
51394 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51395 + *salt = acl_special_roles[i]->salt;
51396 + *sum = acl_special_roles[i]->sum;
51397 + return 1;
51398 + }
51399 + }
51400 +
51401 + return 0;
51402 +}
51403 +
51404 +static void
51405 +assign_special_role(char *rolename)
51406 +{
51407 + struct acl_object_label *obj;
51408 + struct acl_role_label *r;
51409 + struct acl_role_label *assigned = NULL;
51410 + struct task_struct *tsk;
51411 + struct file *filp;
51412 +
51413 + FOR_EACH_ROLE_START(r)
51414 + if (!strcmp(rolename, r->rolename) &&
51415 + (r->roletype & GR_ROLE_SPECIAL)) {
51416 + assigned = r;
51417 + break;
51418 + }
51419 + FOR_EACH_ROLE_END(r)
51420 +
51421 + if (!assigned)
51422 + return;
51423 +
51424 + read_lock(&tasklist_lock);
51425 + read_lock(&grsec_exec_file_lock);
51426 +
51427 + tsk = current->real_parent;
51428 + if (tsk == NULL)
51429 + goto out_unlock;
51430 +
51431 + filp = tsk->exec_file;
51432 + if (filp == NULL)
51433 + goto out_unlock;
51434 +
51435 + tsk->is_writable = 0;
51436 +
51437 + tsk->acl_sp_role = 1;
51438 + tsk->acl_role_id = ++acl_sp_role_value;
51439 + tsk->role = assigned;
51440 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51441 +
51442 + /* ignore additional mmap checks for processes that are writable
51443 + by the default ACL */
51444 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51445 + if (unlikely(obj->mode & GR_WRITE))
51446 + tsk->is_writable = 1;
51447 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51448 + if (unlikely(obj->mode & GR_WRITE))
51449 + tsk->is_writable = 1;
51450 +
51451 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51452 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51453 +#endif
51454 +
51455 +out_unlock:
51456 + read_unlock(&grsec_exec_file_lock);
51457 + read_unlock(&tasklist_lock);
51458 + return;
51459 +}
51460 +
51461 +int gr_check_secure_terminal(struct task_struct *task)
51462 +{
51463 + struct task_struct *p, *p2, *p3;
51464 + struct files_struct *files;
51465 + struct fdtable *fdt;
51466 + struct file *our_file = NULL, *file;
51467 + int i;
51468 +
51469 + if (task->signal->tty == NULL)
51470 + return 1;
51471 +
51472 + files = get_files_struct(task);
51473 + if (files != NULL) {
51474 + rcu_read_lock();
51475 + fdt = files_fdtable(files);
51476 + for (i=0; i < fdt->max_fds; i++) {
51477 + file = fcheck_files(files, i);
51478 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51479 + get_file(file);
51480 + our_file = file;
51481 + }
51482 + }
51483 + rcu_read_unlock();
51484 + put_files_struct(files);
51485 + }
51486 +
51487 + if (our_file == NULL)
51488 + return 1;
51489 +
51490 + read_lock(&tasklist_lock);
51491 + do_each_thread(p2, p) {
51492 + files = get_files_struct(p);
51493 + if (files == NULL ||
51494 + (p->signal && p->signal->tty == task->signal->tty)) {
51495 + if (files != NULL)
51496 + put_files_struct(files);
51497 + continue;
51498 + }
51499 + rcu_read_lock();
51500 + fdt = files_fdtable(files);
51501 + for (i=0; i < fdt->max_fds; i++) {
51502 + file = fcheck_files(files, i);
51503 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51504 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51505 + p3 = task;
51506 + while (p3->pid > 0) {
51507 + if (p3 == p)
51508 + break;
51509 + p3 = p3->real_parent;
51510 + }
51511 + if (p3 == p)
51512 + break;
51513 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51514 + gr_handle_alertkill(p);
51515 + rcu_read_unlock();
51516 + put_files_struct(files);
51517 + read_unlock(&tasklist_lock);
51518 + fput(our_file);
51519 + return 0;
51520 + }
51521 + }
51522 + rcu_read_unlock();
51523 + put_files_struct(files);
51524 + } while_each_thread(p2, p);
51525 + read_unlock(&tasklist_lock);
51526 +
51527 + fput(our_file);
51528 + return 1;
51529 +}
51530 +
51531 +ssize_t
51532 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51533 +{
51534 + struct gr_arg_wrapper uwrap;
51535 + unsigned char *sprole_salt = NULL;
51536 + unsigned char *sprole_sum = NULL;
51537 + int error = sizeof (struct gr_arg_wrapper);
51538 + int error2 = 0;
51539 +
51540 + mutex_lock(&gr_dev_mutex);
51541 +
51542 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51543 + error = -EPERM;
51544 + goto out;
51545 + }
51546 +
51547 + if (count != sizeof (struct gr_arg_wrapper)) {
51548 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51549 + error = -EINVAL;
51550 + goto out;
51551 + }
51552 +
51553 +
51554 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51555 + gr_auth_expires = 0;
51556 + gr_auth_attempts = 0;
51557 + }
51558 +
51559 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51560 + error = -EFAULT;
51561 + goto out;
51562 + }
51563 +
51564 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51565 + error = -EINVAL;
51566 + goto out;
51567 + }
51568 +
51569 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51570 + error = -EFAULT;
51571 + goto out;
51572 + }
51573 +
51574 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51575 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51576 + time_after(gr_auth_expires, get_seconds())) {
51577 + error = -EBUSY;
51578 + goto out;
51579 + }
51580 +
51581 + /* if non-root trying to do anything other than use a special role,
51582 + do not attempt authentication, do not count towards authentication
51583 + locking
51584 + */
51585 +
51586 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51587 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51588 + current_uid()) {
51589 + error = -EPERM;
51590 + goto out;
51591 + }
51592 +
51593 + /* ensure pw and special role name are null terminated */
51594 +
51595 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51596 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51597 +
51598 + /* Okay.
51599 + * We have our enough of the argument structure..(we have yet
51600 + * to copy_from_user the tables themselves) . Copy the tables
51601 + * only if we need them, i.e. for loading operations. */
51602 +
51603 + switch (gr_usermode->mode) {
51604 + case GR_STATUS:
51605 + if (gr_status & GR_READY) {
51606 + error = 1;
51607 + if (!gr_check_secure_terminal(current))
51608 + error = 3;
51609 + } else
51610 + error = 2;
51611 + goto out;
51612 + case GR_SHUTDOWN:
51613 + if ((gr_status & GR_READY)
51614 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51615 + pax_open_kernel();
51616 + gr_status &= ~GR_READY;
51617 + pax_close_kernel();
51618 +
51619 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51620 + free_variables();
51621 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51622 + memset(gr_system_salt, 0, GR_SALT_LEN);
51623 + memset(gr_system_sum, 0, GR_SHA_LEN);
51624 + } else if (gr_status & GR_READY) {
51625 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51626 + error = -EPERM;
51627 + } else {
51628 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51629 + error = -EAGAIN;
51630 + }
51631 + break;
51632 + case GR_ENABLE:
51633 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51634 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51635 + else {
51636 + if (gr_status & GR_READY)
51637 + error = -EAGAIN;
51638 + else
51639 + error = error2;
51640 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51641 + }
51642 + break;
51643 + case GR_RELOAD:
51644 + if (!(gr_status & GR_READY)) {
51645 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51646 + error = -EAGAIN;
51647 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51648 + preempt_disable();
51649 +
51650 + pax_open_kernel();
51651 + gr_status &= ~GR_READY;
51652 + pax_close_kernel();
51653 +
51654 + free_variables();
51655 + if (!(error2 = gracl_init(gr_usermode))) {
51656 + preempt_enable();
51657 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51658 + } else {
51659 + preempt_enable();
51660 + error = error2;
51661 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51662 + }
51663 + } else {
51664 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51665 + error = -EPERM;
51666 + }
51667 + break;
51668 + case GR_SEGVMOD:
51669 + if (unlikely(!(gr_status & GR_READY))) {
51670 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51671 + error = -EAGAIN;
51672 + break;
51673 + }
51674 +
51675 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51676 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51677 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51678 + struct acl_subject_label *segvacl;
51679 + segvacl =
51680 + lookup_acl_subj_label(gr_usermode->segv_inode,
51681 + gr_usermode->segv_device,
51682 + current->role);
51683 + if (segvacl) {
51684 + segvacl->crashes = 0;
51685 + segvacl->expires = 0;
51686 + }
51687 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51688 + gr_remove_uid(gr_usermode->segv_uid);
51689 + }
51690 + } else {
51691 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51692 + error = -EPERM;
51693 + }
51694 + break;
51695 + case GR_SPROLE:
51696 + case GR_SPROLEPAM:
51697 + if (unlikely(!(gr_status & GR_READY))) {
51698 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51699 + error = -EAGAIN;
51700 + break;
51701 + }
51702 +
51703 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51704 + current->role->expires = 0;
51705 + current->role->auth_attempts = 0;
51706 + }
51707 +
51708 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51709 + time_after(current->role->expires, get_seconds())) {
51710 + error = -EBUSY;
51711 + goto out;
51712 + }
51713 +
51714 + if (lookup_special_role_auth
51715 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51716 + && ((!sprole_salt && !sprole_sum)
51717 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51718 + char *p = "";
51719 + assign_special_role(gr_usermode->sp_role);
51720 + read_lock(&tasklist_lock);
51721 + if (current->real_parent)
51722 + p = current->real_parent->role->rolename;
51723 + read_unlock(&tasklist_lock);
51724 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51725 + p, acl_sp_role_value);
51726 + } else {
51727 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51728 + error = -EPERM;
51729 + if(!(current->role->auth_attempts++))
51730 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51731 +
51732 + goto out;
51733 + }
51734 + break;
51735 + case GR_UNSPROLE:
51736 + if (unlikely(!(gr_status & GR_READY))) {
51737 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51738 + error = -EAGAIN;
51739 + break;
51740 + }
51741 +
51742 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51743 + char *p = "";
51744 + int i = 0;
51745 +
51746 + read_lock(&tasklist_lock);
51747 + if (current->real_parent) {
51748 + p = current->real_parent->role->rolename;
51749 + i = current->real_parent->acl_role_id;
51750 + }
51751 + read_unlock(&tasklist_lock);
51752 +
51753 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51754 + gr_set_acls(1);
51755 + } else {
51756 + error = -EPERM;
51757 + goto out;
51758 + }
51759 + break;
51760 + default:
51761 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51762 + error = -EINVAL;
51763 + break;
51764 + }
51765 +
51766 + if (error != -EPERM)
51767 + goto out;
51768 +
51769 + if(!(gr_auth_attempts++))
51770 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51771 +
51772 + out:
51773 + mutex_unlock(&gr_dev_mutex);
51774 + return error;
51775 +}
51776 +
51777 +/* must be called with
51778 + rcu_read_lock();
51779 + read_lock(&tasklist_lock);
51780 + read_lock(&grsec_exec_file_lock);
51781 +*/
51782 +int gr_apply_subject_to_task(struct task_struct *task)
51783 +{
51784 + struct acl_object_label *obj;
51785 + char *tmpname;
51786 + struct acl_subject_label *tmpsubj;
51787 + struct file *filp;
51788 + struct name_entry *nmatch;
51789 +
51790 + filp = task->exec_file;
51791 + if (filp == NULL)
51792 + return 0;
51793 +
51794 + /* the following is to apply the correct subject
51795 + on binaries running when the RBAC system
51796 + is enabled, when the binaries have been
51797 + replaced or deleted since their execution
51798 + -----
51799 + when the RBAC system starts, the inode/dev
51800 + from exec_file will be one the RBAC system
51801 + is unaware of. It only knows the inode/dev
51802 + of the present file on disk, or the absence
51803 + of it.
51804 + */
51805 + preempt_disable();
51806 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51807 +
51808 + nmatch = lookup_name_entry(tmpname);
51809 + preempt_enable();
51810 + tmpsubj = NULL;
51811 + if (nmatch) {
51812 + if (nmatch->deleted)
51813 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51814 + else
51815 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51816 + if (tmpsubj != NULL)
51817 + task->acl = tmpsubj;
51818 + }
51819 + if (tmpsubj == NULL)
51820 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51821 + task->role);
51822 + if (task->acl) {
51823 + task->is_writable = 0;
51824 + /* ignore additional mmap checks for processes that are writable
51825 + by the default ACL */
51826 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51827 + if (unlikely(obj->mode & GR_WRITE))
51828 + task->is_writable = 1;
51829 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51830 + if (unlikely(obj->mode & GR_WRITE))
51831 + task->is_writable = 1;
51832 +
51833 + gr_set_proc_res(task);
51834 +
51835 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51836 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51837 +#endif
51838 + } else {
51839 + return 1;
51840 + }
51841 +
51842 + return 0;
51843 +}
51844 +
51845 +int
51846 +gr_set_acls(const int type)
51847 +{
51848 + struct task_struct *task, *task2;
51849 + struct acl_role_label *role = current->role;
51850 + __u16 acl_role_id = current->acl_role_id;
51851 + const struct cred *cred;
51852 + int ret;
51853 +
51854 + rcu_read_lock();
51855 + read_lock(&tasklist_lock);
51856 + read_lock(&grsec_exec_file_lock);
51857 + do_each_thread(task2, task) {
51858 + /* check to see if we're called from the exit handler,
51859 + if so, only replace ACLs that have inherited the admin
51860 + ACL */
51861 +
51862 + if (type && (task->role != role ||
51863 + task->acl_role_id != acl_role_id))
51864 + continue;
51865 +
51866 + task->acl_role_id = 0;
51867 + task->acl_sp_role = 0;
51868 +
51869 + if (task->exec_file) {
51870 + cred = __task_cred(task);
51871 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51872 + ret = gr_apply_subject_to_task(task);
51873 + if (ret) {
51874 + read_unlock(&grsec_exec_file_lock);
51875 + read_unlock(&tasklist_lock);
51876 + rcu_read_unlock();
51877 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51878 + return ret;
51879 + }
51880 + } else {
51881 + // it's a kernel process
51882 + task->role = kernel_role;
51883 + task->acl = kernel_role->root_label;
51884 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51885 + task->acl->mode &= ~GR_PROCFIND;
51886 +#endif
51887 + }
51888 + } while_each_thread(task2, task);
51889 + read_unlock(&grsec_exec_file_lock);
51890 + read_unlock(&tasklist_lock);
51891 + rcu_read_unlock();
51892 +
51893 + return 0;
51894 +}
51895 +
51896 +void
51897 +gr_learn_resource(const struct task_struct *task,
51898 + const int res, const unsigned long wanted, const int gt)
51899 +{
51900 + struct acl_subject_label *acl;
51901 + const struct cred *cred;
51902 +
51903 + if (unlikely((gr_status & GR_READY) &&
51904 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51905 + goto skip_reslog;
51906 +
51907 +#ifdef CONFIG_GRKERNSEC_RESLOG
51908 + gr_log_resource(task, res, wanted, gt);
51909 +#endif
51910 + skip_reslog:
51911 +
51912 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51913 + return;
51914 +
51915 + acl = task->acl;
51916 +
51917 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51918 + !(acl->resmask & (1 << (unsigned short) res))))
51919 + return;
51920 +
51921 + if (wanted >= acl->res[res].rlim_cur) {
51922 + unsigned long res_add;
51923 +
51924 + res_add = wanted;
51925 + switch (res) {
51926 + case RLIMIT_CPU:
51927 + res_add += GR_RLIM_CPU_BUMP;
51928 + break;
51929 + case RLIMIT_FSIZE:
51930 + res_add += GR_RLIM_FSIZE_BUMP;
51931 + break;
51932 + case RLIMIT_DATA:
51933 + res_add += GR_RLIM_DATA_BUMP;
51934 + break;
51935 + case RLIMIT_STACK:
51936 + res_add += GR_RLIM_STACK_BUMP;
51937 + break;
51938 + case RLIMIT_CORE:
51939 + res_add += GR_RLIM_CORE_BUMP;
51940 + break;
51941 + case RLIMIT_RSS:
51942 + res_add += GR_RLIM_RSS_BUMP;
51943 + break;
51944 + case RLIMIT_NPROC:
51945 + res_add += GR_RLIM_NPROC_BUMP;
51946 + break;
51947 + case RLIMIT_NOFILE:
51948 + res_add += GR_RLIM_NOFILE_BUMP;
51949 + break;
51950 + case RLIMIT_MEMLOCK:
51951 + res_add += GR_RLIM_MEMLOCK_BUMP;
51952 + break;
51953 + case RLIMIT_AS:
51954 + res_add += GR_RLIM_AS_BUMP;
51955 + break;
51956 + case RLIMIT_LOCKS:
51957 + res_add += GR_RLIM_LOCKS_BUMP;
51958 + break;
51959 + case RLIMIT_SIGPENDING:
51960 + res_add += GR_RLIM_SIGPENDING_BUMP;
51961 + break;
51962 + case RLIMIT_MSGQUEUE:
51963 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51964 + break;
51965 + case RLIMIT_NICE:
51966 + res_add += GR_RLIM_NICE_BUMP;
51967 + break;
51968 + case RLIMIT_RTPRIO:
51969 + res_add += GR_RLIM_RTPRIO_BUMP;
51970 + break;
51971 + case RLIMIT_RTTIME:
51972 + res_add += GR_RLIM_RTTIME_BUMP;
51973 + break;
51974 + }
51975 +
51976 + acl->res[res].rlim_cur = res_add;
51977 +
51978 + if (wanted > acl->res[res].rlim_max)
51979 + acl->res[res].rlim_max = res_add;
51980 +
51981 + /* only log the subject filename, since resource logging is supported for
51982 + single-subject learning only */
51983 + rcu_read_lock();
51984 + cred = __task_cred(task);
51985 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51986 + task->role->roletype, cred->uid, cred->gid, acl->filename,
51987 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51988 + "", (unsigned long) res, &task->signal->saved_ip);
51989 + rcu_read_unlock();
51990 + }
51991 +
51992 + return;
51993 +}
51994 +
51995 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51996 +void
51997 +pax_set_initial_flags(struct linux_binprm *bprm)
51998 +{
51999 + struct task_struct *task = current;
52000 + struct acl_subject_label *proc;
52001 + unsigned long flags;
52002 +
52003 + if (unlikely(!(gr_status & GR_READY)))
52004 + return;
52005 +
52006 + flags = pax_get_flags(task);
52007 +
52008 + proc = task->acl;
52009 +
52010 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
52011 + flags &= ~MF_PAX_PAGEEXEC;
52012 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
52013 + flags &= ~MF_PAX_SEGMEXEC;
52014 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
52015 + flags &= ~MF_PAX_RANDMMAP;
52016 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
52017 + flags &= ~MF_PAX_EMUTRAMP;
52018 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
52019 + flags &= ~MF_PAX_MPROTECT;
52020 +
52021 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
52022 + flags |= MF_PAX_PAGEEXEC;
52023 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
52024 + flags |= MF_PAX_SEGMEXEC;
52025 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
52026 + flags |= MF_PAX_RANDMMAP;
52027 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
52028 + flags |= MF_PAX_EMUTRAMP;
52029 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
52030 + flags |= MF_PAX_MPROTECT;
52031 +
52032 + pax_set_flags(task, flags);
52033 +
52034 + return;
52035 +}
52036 +#endif
52037 +
52038 +#ifdef CONFIG_SYSCTL
52039 +/* Eric Biederman likes breaking userland ABI and every inode-based security
52040 + system to save 35kb of memory */
52041 +
52042 +/* we modify the passed in filename, but adjust it back before returning */
52043 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
52044 +{
52045 + struct name_entry *nmatch;
52046 + char *p, *lastp = NULL;
52047 + struct acl_object_label *obj = NULL, *tmp;
52048 + struct acl_subject_label *tmpsubj;
52049 + char c = '\0';
52050 +
52051 + read_lock(&gr_inode_lock);
52052 +
52053 + p = name + len - 1;
52054 + do {
52055 + nmatch = lookup_name_entry(name);
52056 + if (lastp != NULL)
52057 + *lastp = c;
52058 +
52059 + if (nmatch == NULL)
52060 + goto next_component;
52061 + tmpsubj = current->acl;
52062 + do {
52063 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
52064 + if (obj != NULL) {
52065 + tmp = obj->globbed;
52066 + while (tmp) {
52067 + if (!glob_match(tmp->filename, name)) {
52068 + obj = tmp;
52069 + goto found_obj;
52070 + }
52071 + tmp = tmp->next;
52072 + }
52073 + goto found_obj;
52074 + }
52075 + } while ((tmpsubj = tmpsubj->parent_subject));
52076 +next_component:
52077 + /* end case */
52078 + if (p == name)
52079 + break;
52080 +
52081 + while (*p != '/')
52082 + p--;
52083 + if (p == name)
52084 + lastp = p + 1;
52085 + else {
52086 + lastp = p;
52087 + p--;
52088 + }
52089 + c = *lastp;
52090 + *lastp = '\0';
52091 + } while (1);
52092 +found_obj:
52093 + read_unlock(&gr_inode_lock);
52094 + /* obj returned will always be non-null */
52095 + return obj;
52096 +}
52097 +
52098 +/* returns 0 when allowing, non-zero on error
52099 + op of 0 is used for readdir, so we don't log the names of hidden files
52100 +*/
52101 +__u32
52102 +gr_handle_sysctl(const struct ctl_table *table, const int op)
52103 +{
52104 + struct ctl_table *tmp;
52105 + const char *proc_sys = "/proc/sys";
52106 + char *path;
52107 + struct acl_object_label *obj;
52108 + unsigned short len = 0, pos = 0, depth = 0, i;
52109 + __u32 err = 0;
52110 + __u32 mode = 0;
52111 +
52112 + if (unlikely(!(gr_status & GR_READY)))
52113 + return 0;
52114 +
52115 + /* for now, ignore operations on non-sysctl entries if it's not a
52116 + readdir*/
52117 + if (table->child != NULL && op != 0)
52118 + return 0;
52119 +
52120 + mode |= GR_FIND;
52121 + /* it's only a read if it's an entry, read on dirs is for readdir */
52122 + if (op & MAY_READ)
52123 + mode |= GR_READ;
52124 + if (op & MAY_WRITE)
52125 + mode |= GR_WRITE;
52126 +
52127 + preempt_disable();
52128 +
52129 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
52130 +
52131 + /* it's only a read/write if it's an actual entry, not a dir
52132 + (which are opened for readdir)
52133 + */
52134 +
52135 + /* convert the requested sysctl entry into a pathname */
52136 +
52137 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52138 + len += strlen(tmp->procname);
52139 + len++;
52140 + depth++;
52141 + }
52142 +
52143 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
52144 + /* deny */
52145 + goto out;
52146 + }
52147 +
52148 + memset(path, 0, PAGE_SIZE);
52149 +
52150 + memcpy(path, proc_sys, strlen(proc_sys));
52151 +
52152 + pos += strlen(proc_sys);
52153 +
52154 + for (; depth > 0; depth--) {
52155 + path[pos] = '/';
52156 + pos++;
52157 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52158 + if (depth == i) {
52159 + memcpy(path + pos, tmp->procname,
52160 + strlen(tmp->procname));
52161 + pos += strlen(tmp->procname);
52162 + }
52163 + i++;
52164 + }
52165 + }
52166 +
52167 + obj = gr_lookup_by_name(path, pos);
52168 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52169 +
52170 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52171 + ((err & mode) != mode))) {
52172 + __u32 new_mode = mode;
52173 +
52174 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52175 +
52176 + err = 0;
52177 + gr_log_learn_sysctl(path, new_mode);
52178 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52179 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52180 + err = -ENOENT;
52181 + } else if (!(err & GR_FIND)) {
52182 + err = -ENOENT;
52183 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52184 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52185 + path, (mode & GR_READ) ? " reading" : "",
52186 + (mode & GR_WRITE) ? " writing" : "");
52187 + err = -EACCES;
52188 + } else if ((err & mode) != mode) {
52189 + err = -EACCES;
52190 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52191 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52192 + path, (mode & GR_READ) ? " reading" : "",
52193 + (mode & GR_WRITE) ? " writing" : "");
52194 + err = 0;
52195 + } else
52196 + err = 0;
52197 +
52198 + out:
52199 + preempt_enable();
52200 +
52201 + return err;
52202 +}
52203 +#endif
52204 +
52205 +int
52206 +gr_handle_proc_ptrace(struct task_struct *task)
52207 +{
52208 + struct file *filp;
52209 + struct task_struct *tmp = task;
52210 + struct task_struct *curtemp = current;
52211 + __u32 retmode;
52212 +
52213 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52214 + if (unlikely(!(gr_status & GR_READY)))
52215 + return 0;
52216 +#endif
52217 +
52218 + read_lock(&tasklist_lock);
52219 + read_lock(&grsec_exec_file_lock);
52220 + filp = task->exec_file;
52221 +
52222 + while (tmp->pid > 0) {
52223 + if (tmp == curtemp)
52224 + break;
52225 + tmp = tmp->real_parent;
52226 + }
52227 +
52228 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52229 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52230 + read_unlock(&grsec_exec_file_lock);
52231 + read_unlock(&tasklist_lock);
52232 + return 1;
52233 + }
52234 +
52235 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52236 + if (!(gr_status & GR_READY)) {
52237 + read_unlock(&grsec_exec_file_lock);
52238 + read_unlock(&tasklist_lock);
52239 + return 0;
52240 + }
52241 +#endif
52242 +
52243 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52244 + read_unlock(&grsec_exec_file_lock);
52245 + read_unlock(&tasklist_lock);
52246 +
52247 + if (retmode & GR_NOPTRACE)
52248 + return 1;
52249 +
52250 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52251 + && (current->acl != task->acl || (current->acl != current->role->root_label
52252 + && current->pid != task->pid)))
52253 + return 1;
52254 +
52255 + return 0;
52256 +}
52257 +
52258 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52259 +{
52260 + if (unlikely(!(gr_status & GR_READY)))
52261 + return;
52262 +
52263 + if (!(current->role->roletype & GR_ROLE_GOD))
52264 + return;
52265 +
52266 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52267 + p->role->rolename, gr_task_roletype_to_char(p),
52268 + p->acl->filename);
52269 +}
52270 +
52271 +int
52272 +gr_handle_ptrace(struct task_struct *task, const long request)
52273 +{
52274 + struct task_struct *tmp = task;
52275 + struct task_struct *curtemp = current;
52276 + __u32 retmode;
52277 +
52278 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52279 + if (unlikely(!(gr_status & GR_READY)))
52280 + return 0;
52281 +#endif
52282 +
52283 + read_lock(&tasklist_lock);
52284 + while (tmp->pid > 0) {
52285 + if (tmp == curtemp)
52286 + break;
52287 + tmp = tmp->real_parent;
52288 + }
52289 +
52290 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52291 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52292 + read_unlock(&tasklist_lock);
52293 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52294 + return 1;
52295 + }
52296 + read_unlock(&tasklist_lock);
52297 +
52298 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52299 + if (!(gr_status & GR_READY))
52300 + return 0;
52301 +#endif
52302 +
52303 + read_lock(&grsec_exec_file_lock);
52304 + if (unlikely(!task->exec_file)) {
52305 + read_unlock(&grsec_exec_file_lock);
52306 + return 0;
52307 + }
52308 +
52309 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52310 + read_unlock(&grsec_exec_file_lock);
52311 +
52312 + if (retmode & GR_NOPTRACE) {
52313 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52314 + return 1;
52315 + }
52316 +
52317 + if (retmode & GR_PTRACERD) {
52318 + switch (request) {
52319 + case PTRACE_SEIZE:
52320 + case PTRACE_POKETEXT:
52321 + case PTRACE_POKEDATA:
52322 + case PTRACE_POKEUSR:
52323 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52324 + case PTRACE_SETREGS:
52325 + case PTRACE_SETFPREGS:
52326 +#endif
52327 +#ifdef CONFIG_X86
52328 + case PTRACE_SETFPXREGS:
52329 +#endif
52330 +#ifdef CONFIG_ALTIVEC
52331 + case PTRACE_SETVRREGS:
52332 +#endif
52333 + return 1;
52334 + default:
52335 + return 0;
52336 + }
52337 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52338 + !(current->role->roletype & GR_ROLE_GOD) &&
52339 + (current->acl != task->acl)) {
52340 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52341 + return 1;
52342 + }
52343 +
52344 + return 0;
52345 +}
52346 +
52347 +static int is_writable_mmap(const struct file *filp)
52348 +{
52349 + struct task_struct *task = current;
52350 + struct acl_object_label *obj, *obj2;
52351 +
52352 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52353 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52354 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52355 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52356 + task->role->root_label);
52357 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52358 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52359 + return 1;
52360 + }
52361 + }
52362 + return 0;
52363 +}
52364 +
52365 +int
52366 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52367 +{
52368 + __u32 mode;
52369 +
52370 + if (unlikely(!file || !(prot & PROT_EXEC)))
52371 + return 1;
52372 +
52373 + if (is_writable_mmap(file))
52374 + return 0;
52375 +
52376 + mode =
52377 + gr_search_file(file->f_path.dentry,
52378 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52379 + file->f_path.mnt);
52380 +
52381 + if (!gr_tpe_allow(file))
52382 + return 0;
52383 +
52384 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52385 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52386 + return 0;
52387 + } else if (unlikely(!(mode & GR_EXEC))) {
52388 + return 0;
52389 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52390 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52391 + return 1;
52392 + }
52393 +
52394 + return 1;
52395 +}
52396 +
52397 +int
52398 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52399 +{
52400 + __u32 mode;
52401 +
52402 + if (unlikely(!file || !(prot & PROT_EXEC)))
52403 + return 1;
52404 +
52405 + if (is_writable_mmap(file))
52406 + return 0;
52407 +
52408 + mode =
52409 + gr_search_file(file->f_path.dentry,
52410 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52411 + file->f_path.mnt);
52412 +
52413 + if (!gr_tpe_allow(file))
52414 + return 0;
52415 +
52416 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52417 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52418 + return 0;
52419 + } else if (unlikely(!(mode & GR_EXEC))) {
52420 + return 0;
52421 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52422 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52423 + return 1;
52424 + }
52425 +
52426 + return 1;
52427 +}
52428 +
52429 +void
52430 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52431 +{
52432 + unsigned long runtime;
52433 + unsigned long cputime;
52434 + unsigned int wday, cday;
52435 + __u8 whr, chr;
52436 + __u8 wmin, cmin;
52437 + __u8 wsec, csec;
52438 + struct timespec timeval;
52439 +
52440 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52441 + !(task->acl->mode & GR_PROCACCT)))
52442 + return;
52443 +
52444 + do_posix_clock_monotonic_gettime(&timeval);
52445 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52446 + wday = runtime / (3600 * 24);
52447 + runtime -= wday * (3600 * 24);
52448 + whr = runtime / 3600;
52449 + runtime -= whr * 3600;
52450 + wmin = runtime / 60;
52451 + runtime -= wmin * 60;
52452 + wsec = runtime;
52453 +
52454 + cputime = (task->utime + task->stime) / HZ;
52455 + cday = cputime / (3600 * 24);
52456 + cputime -= cday * (3600 * 24);
52457 + chr = cputime / 3600;
52458 + cputime -= chr * 3600;
52459 + cmin = cputime / 60;
52460 + cputime -= cmin * 60;
52461 + csec = cputime;
52462 +
52463 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52464 +
52465 + return;
52466 +}
52467 +
52468 +void gr_set_kernel_label(struct task_struct *task)
52469 +{
52470 + if (gr_status & GR_READY) {
52471 + task->role = kernel_role;
52472 + task->acl = kernel_role->root_label;
52473 + }
52474 + return;
52475 +}
52476 +
52477 +#ifdef CONFIG_TASKSTATS
52478 +int gr_is_taskstats_denied(int pid)
52479 +{
52480 + struct task_struct *task;
52481 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52482 + const struct cred *cred;
52483 +#endif
52484 + int ret = 0;
52485 +
52486 + /* restrict taskstats viewing to un-chrooted root users
52487 + who have the 'view' subject flag if the RBAC system is enabled
52488 + */
52489 +
52490 + rcu_read_lock();
52491 + read_lock(&tasklist_lock);
52492 + task = find_task_by_vpid(pid);
52493 + if (task) {
52494 +#ifdef CONFIG_GRKERNSEC_CHROOT
52495 + if (proc_is_chrooted(task))
52496 + ret = -EACCES;
52497 +#endif
52498 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52499 + cred = __task_cred(task);
52500 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52501 + if (cred->uid != 0)
52502 + ret = -EACCES;
52503 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52504 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52505 + ret = -EACCES;
52506 +#endif
52507 +#endif
52508 + if (gr_status & GR_READY) {
52509 + if (!(task->acl->mode & GR_VIEW))
52510 + ret = -EACCES;
52511 + }
52512 + } else
52513 + ret = -ENOENT;
52514 +
52515 + read_unlock(&tasklist_lock);
52516 + rcu_read_unlock();
52517 +
52518 + return ret;
52519 +}
52520 +#endif
52521 +
52522 +/* AUXV entries are filled via a descendant of search_binary_handler
52523 + after we've already applied the subject for the target
52524 +*/
52525 +int gr_acl_enable_at_secure(void)
52526 +{
52527 + if (unlikely(!(gr_status & GR_READY)))
52528 + return 0;
52529 +
52530 + if (current->acl->mode & GR_ATSECURE)
52531 + return 1;
52532 +
52533 + return 0;
52534 +}
52535 +
52536 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52537 +{
52538 + struct task_struct *task = current;
52539 + struct dentry *dentry = file->f_path.dentry;
52540 + struct vfsmount *mnt = file->f_path.mnt;
52541 + struct acl_object_label *obj, *tmp;
52542 + struct acl_subject_label *subj;
52543 + unsigned int bufsize;
52544 + int is_not_root;
52545 + char *path;
52546 + dev_t dev = __get_dev(dentry);
52547 +
52548 + if (unlikely(!(gr_status & GR_READY)))
52549 + return 1;
52550 +
52551 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52552 + return 1;
52553 +
52554 + /* ignore Eric Biederman */
52555 + if (IS_PRIVATE(dentry->d_inode))
52556 + return 1;
52557 +
52558 + subj = task->acl;
52559 + do {
52560 + obj = lookup_acl_obj_label(ino, dev, subj);
52561 + if (obj != NULL)
52562 + return (obj->mode & GR_FIND) ? 1 : 0;
52563 + } while ((subj = subj->parent_subject));
52564 +
52565 + /* this is purely an optimization since we're looking for an object
52566 + for the directory we're doing a readdir on
52567 + if it's possible for any globbed object to match the entry we're
52568 + filling into the directory, then the object we find here will be
52569 + an anchor point with attached globbed objects
52570 + */
52571 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52572 + if (obj->globbed == NULL)
52573 + return (obj->mode & GR_FIND) ? 1 : 0;
52574 +
52575 + is_not_root = ((obj->filename[0] == '/') &&
52576 + (obj->filename[1] == '\0')) ? 0 : 1;
52577 + bufsize = PAGE_SIZE - namelen - is_not_root;
52578 +
52579 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52580 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52581 + return 1;
52582 +
52583 + preempt_disable();
52584 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52585 + bufsize);
52586 +
52587 + bufsize = strlen(path);
52588 +
52589 + /* if base is "/", don't append an additional slash */
52590 + if (is_not_root)
52591 + *(path + bufsize) = '/';
52592 + memcpy(path + bufsize + is_not_root, name, namelen);
52593 + *(path + bufsize + namelen + is_not_root) = '\0';
52594 +
52595 + tmp = obj->globbed;
52596 + while (tmp) {
52597 + if (!glob_match(tmp->filename, path)) {
52598 + preempt_enable();
52599 + return (tmp->mode & GR_FIND) ? 1 : 0;
52600 + }
52601 + tmp = tmp->next;
52602 + }
52603 + preempt_enable();
52604 + return (obj->mode & GR_FIND) ? 1 : 0;
52605 +}
52606 +
52607 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52608 +EXPORT_SYMBOL(gr_acl_is_enabled);
52609 +#endif
52610 +EXPORT_SYMBOL(gr_learn_resource);
52611 +EXPORT_SYMBOL(gr_set_kernel_label);
52612 +#ifdef CONFIG_SECURITY
52613 +EXPORT_SYMBOL(gr_check_user_change);
52614 +EXPORT_SYMBOL(gr_check_group_change);
52615 +#endif
52616 +
52617 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52618 new file mode 100644
52619 index 0000000..34fefda
52620 --- /dev/null
52621 +++ b/grsecurity/gracl_alloc.c
52622 @@ -0,0 +1,105 @@
52623 +#include <linux/kernel.h>
52624 +#include <linux/mm.h>
52625 +#include <linux/slab.h>
52626 +#include <linux/vmalloc.h>
52627 +#include <linux/gracl.h>
52628 +#include <linux/grsecurity.h>
52629 +
52630 +static unsigned long alloc_stack_next = 1;
52631 +static unsigned long alloc_stack_size = 1;
52632 +static void **alloc_stack;
52633 +
52634 +static __inline__ int
52635 +alloc_pop(void)
52636 +{
52637 + if (alloc_stack_next == 1)
52638 + return 0;
52639 +
52640 + kfree(alloc_stack[alloc_stack_next - 2]);
52641 +
52642 + alloc_stack_next--;
52643 +
52644 + return 1;
52645 +}
52646 +
52647 +static __inline__ int
52648 +alloc_push(void *buf)
52649 +{
52650 + if (alloc_stack_next >= alloc_stack_size)
52651 + return 1;
52652 +
52653 + alloc_stack[alloc_stack_next - 1] = buf;
52654 +
52655 + alloc_stack_next++;
52656 +
52657 + return 0;
52658 +}
52659 +
52660 +void *
52661 +acl_alloc(unsigned long len)
52662 +{
52663 + void *ret = NULL;
52664 +
52665 + if (!len || len > PAGE_SIZE)
52666 + goto out;
52667 +
52668 + ret = kmalloc(len, GFP_KERNEL);
52669 +
52670 + if (ret) {
52671 + if (alloc_push(ret)) {
52672 + kfree(ret);
52673 + ret = NULL;
52674 + }
52675 + }
52676 +
52677 +out:
52678 + return ret;
52679 +}
52680 +
52681 +void *
52682 +acl_alloc_num(unsigned long num, unsigned long len)
52683 +{
52684 + if (!len || (num > (PAGE_SIZE / len)))
52685 + return NULL;
52686 +
52687 + return acl_alloc(num * len);
52688 +}
52689 +
52690 +void
52691 +acl_free_all(void)
52692 +{
52693 + if (gr_acl_is_enabled() || !alloc_stack)
52694 + return;
52695 +
52696 + while (alloc_pop()) ;
52697 +
52698 + if (alloc_stack) {
52699 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52700 + kfree(alloc_stack);
52701 + else
52702 + vfree(alloc_stack);
52703 + }
52704 +
52705 + alloc_stack = NULL;
52706 + alloc_stack_size = 1;
52707 + alloc_stack_next = 1;
52708 +
52709 + return;
52710 +}
52711 +
52712 +int
52713 +acl_alloc_stack_init(unsigned long size)
52714 +{
52715 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52716 + alloc_stack =
52717 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52718 + else
52719 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52720 +
52721 + alloc_stack_size = size;
52722 +
52723 + if (!alloc_stack)
52724 + return 0;
52725 + else
52726 + return 1;
52727 +}
52728 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52729 new file mode 100644
52730 index 0000000..955ddfb
52731 --- /dev/null
52732 +++ b/grsecurity/gracl_cap.c
52733 @@ -0,0 +1,101 @@
52734 +#include <linux/kernel.h>
52735 +#include <linux/module.h>
52736 +#include <linux/sched.h>
52737 +#include <linux/gracl.h>
52738 +#include <linux/grsecurity.h>
52739 +#include <linux/grinternal.h>
52740 +
52741 +extern const char *captab_log[];
52742 +extern int captab_log_entries;
52743 +
52744 +int
52745 +gr_acl_is_capable(const int cap)
52746 +{
52747 + struct task_struct *task = current;
52748 + const struct cred *cred = current_cred();
52749 + struct acl_subject_label *curracl;
52750 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52751 + kernel_cap_t cap_audit = __cap_empty_set;
52752 +
52753 + if (!gr_acl_is_enabled())
52754 + return 1;
52755 +
52756 + curracl = task->acl;
52757 +
52758 + cap_drop = curracl->cap_lower;
52759 + cap_mask = curracl->cap_mask;
52760 + cap_audit = curracl->cap_invert_audit;
52761 +
52762 + while ((curracl = curracl->parent_subject)) {
52763 + /* if the cap isn't specified in the current computed mask but is specified in the
52764 + current level subject, and is lowered in the current level subject, then add
52765 + it to the set of dropped capabilities
52766 + otherwise, add the current level subject's mask to the current computed mask
52767 + */
52768 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52769 + cap_raise(cap_mask, cap);
52770 + if (cap_raised(curracl->cap_lower, cap))
52771 + cap_raise(cap_drop, cap);
52772 + if (cap_raised(curracl->cap_invert_audit, cap))
52773 + cap_raise(cap_audit, cap);
52774 + }
52775 + }
52776 +
52777 + if (!cap_raised(cap_drop, cap)) {
52778 + if (cap_raised(cap_audit, cap))
52779 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52780 + return 1;
52781 + }
52782 +
52783 + curracl = task->acl;
52784 +
52785 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52786 + && cap_raised(cred->cap_effective, cap)) {
52787 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52788 + task->role->roletype, cred->uid,
52789 + cred->gid, task->exec_file ?
52790 + gr_to_filename(task->exec_file->f_path.dentry,
52791 + task->exec_file->f_path.mnt) : curracl->filename,
52792 + curracl->filename, 0UL,
52793 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52794 + return 1;
52795 + }
52796 +
52797 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52798 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52799 + return 0;
52800 +}
52801 +
52802 +int
52803 +gr_acl_is_capable_nolog(const int cap)
52804 +{
52805 + struct acl_subject_label *curracl;
52806 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52807 +
52808 + if (!gr_acl_is_enabled())
52809 + return 1;
52810 +
52811 + curracl = current->acl;
52812 +
52813 + cap_drop = curracl->cap_lower;
52814 + cap_mask = curracl->cap_mask;
52815 +
52816 + while ((curracl = curracl->parent_subject)) {
52817 + /* if the cap isn't specified in the current computed mask but is specified in the
52818 + current level subject, and is lowered in the current level subject, then add
52819 + it to the set of dropped capabilities
52820 + otherwise, add the current level subject's mask to the current computed mask
52821 + */
52822 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52823 + cap_raise(cap_mask, cap);
52824 + if (cap_raised(curracl->cap_lower, cap))
52825 + cap_raise(cap_drop, cap);
52826 + }
52827 + }
52828 +
52829 + if (!cap_raised(cap_drop, cap))
52830 + return 1;
52831 +
52832 + return 0;
52833 +}
52834 +
52835 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52836 new file mode 100644
52837 index 0000000..88d0e87
52838 --- /dev/null
52839 +++ b/grsecurity/gracl_fs.c
52840 @@ -0,0 +1,435 @@
52841 +#include <linux/kernel.h>
52842 +#include <linux/sched.h>
52843 +#include <linux/types.h>
52844 +#include <linux/fs.h>
52845 +#include <linux/file.h>
52846 +#include <linux/stat.h>
52847 +#include <linux/grsecurity.h>
52848 +#include <linux/grinternal.h>
52849 +#include <linux/gracl.h>
52850 +
52851 +umode_t
52852 +gr_acl_umask(void)
52853 +{
52854 + if (unlikely(!gr_acl_is_enabled()))
52855 + return 0;
52856 +
52857 + return current->role->umask;
52858 +}
52859 +
52860 +__u32
52861 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52862 + const struct vfsmount * mnt)
52863 +{
52864 + __u32 mode;
52865 +
52866 + if (unlikely(!dentry->d_inode))
52867 + return GR_FIND;
52868 +
52869 + mode =
52870 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52871 +
52872 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52873 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52874 + return mode;
52875 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52876 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52877 + return 0;
52878 + } else if (unlikely(!(mode & GR_FIND)))
52879 + return 0;
52880 +
52881 + return GR_FIND;
52882 +}
52883 +
52884 +__u32
52885 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52886 + int acc_mode)
52887 +{
52888 + __u32 reqmode = GR_FIND;
52889 + __u32 mode;
52890 +
52891 + if (unlikely(!dentry->d_inode))
52892 + return reqmode;
52893 +
52894 + if (acc_mode & MAY_APPEND)
52895 + reqmode |= GR_APPEND;
52896 + else if (acc_mode & MAY_WRITE)
52897 + reqmode |= GR_WRITE;
52898 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52899 + reqmode |= GR_READ;
52900 +
52901 + mode =
52902 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52903 + mnt);
52904 +
52905 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52906 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52907 + reqmode & GR_READ ? " reading" : "",
52908 + reqmode & GR_WRITE ? " writing" : reqmode &
52909 + GR_APPEND ? " appending" : "");
52910 + return reqmode;
52911 + } else
52912 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52913 + {
52914 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52915 + reqmode & GR_READ ? " reading" : "",
52916 + reqmode & GR_WRITE ? " writing" : reqmode &
52917 + GR_APPEND ? " appending" : "");
52918 + return 0;
52919 + } else if (unlikely((mode & reqmode) != reqmode))
52920 + return 0;
52921 +
52922 + return reqmode;
52923 +}
52924 +
52925 +__u32
52926 +gr_acl_handle_creat(const struct dentry * dentry,
52927 + const struct dentry * p_dentry,
52928 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52929 + const int imode)
52930 +{
52931 + __u32 reqmode = GR_WRITE | GR_CREATE;
52932 + __u32 mode;
52933 +
52934 + if (acc_mode & MAY_APPEND)
52935 + reqmode |= GR_APPEND;
52936 + // if a directory was required or the directory already exists, then
52937 + // don't count this open as a read
52938 + if ((acc_mode & MAY_READ) &&
52939 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52940 + reqmode |= GR_READ;
52941 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52942 + reqmode |= GR_SETID;
52943 +
52944 + mode =
52945 + gr_check_create(dentry, p_dentry, p_mnt,
52946 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52947 +
52948 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52949 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52950 + reqmode & GR_READ ? " reading" : "",
52951 + reqmode & GR_WRITE ? " writing" : reqmode &
52952 + GR_APPEND ? " appending" : "");
52953 + return reqmode;
52954 + } else
52955 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52956 + {
52957 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52958 + reqmode & GR_READ ? " reading" : "",
52959 + reqmode & GR_WRITE ? " writing" : reqmode &
52960 + GR_APPEND ? " appending" : "");
52961 + return 0;
52962 + } else if (unlikely((mode & reqmode) != reqmode))
52963 + return 0;
52964 +
52965 + return reqmode;
52966 +}
52967 +
52968 +__u32
52969 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52970 + const int fmode)
52971 +{
52972 + __u32 mode, reqmode = GR_FIND;
52973 +
52974 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52975 + reqmode |= GR_EXEC;
52976 + if (fmode & S_IWOTH)
52977 + reqmode |= GR_WRITE;
52978 + if (fmode & S_IROTH)
52979 + reqmode |= GR_READ;
52980 +
52981 + mode =
52982 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52983 + mnt);
52984 +
52985 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52986 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52987 + reqmode & GR_READ ? " reading" : "",
52988 + reqmode & GR_WRITE ? " writing" : "",
52989 + reqmode & GR_EXEC ? " executing" : "");
52990 + return reqmode;
52991 + } else
52992 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52993 + {
52994 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52995 + reqmode & GR_READ ? " reading" : "",
52996 + reqmode & GR_WRITE ? " writing" : "",
52997 + reqmode & GR_EXEC ? " executing" : "");
52998 + return 0;
52999 + } else if (unlikely((mode & reqmode) != reqmode))
53000 + return 0;
53001 +
53002 + return reqmode;
53003 +}
53004 +
53005 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
53006 +{
53007 + __u32 mode;
53008 +
53009 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
53010 +
53011 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53012 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
53013 + return mode;
53014 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53015 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
53016 + return 0;
53017 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53018 + return 0;
53019 +
53020 + return (reqmode);
53021 +}
53022 +
53023 +__u32
53024 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53025 +{
53026 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
53027 +}
53028 +
53029 +__u32
53030 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
53031 +{
53032 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
53033 +}
53034 +
53035 +__u32
53036 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
53037 +{
53038 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
53039 +}
53040 +
53041 +__u32
53042 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
53043 +{
53044 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
53045 +}
53046 +
53047 +__u32
53048 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
53049 + umode_t *modeptr)
53050 +{
53051 + umode_t mode;
53052 +
53053 + *modeptr &= ~gr_acl_umask();
53054 + mode = *modeptr;
53055 +
53056 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
53057 + return 1;
53058 +
53059 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
53060 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
53061 + GR_CHMOD_ACL_MSG);
53062 + } else {
53063 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
53064 + }
53065 +}
53066 +
53067 +__u32
53068 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
53069 +{
53070 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
53071 +}
53072 +
53073 +__u32
53074 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
53075 +{
53076 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
53077 +}
53078 +
53079 +__u32
53080 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
53081 +{
53082 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
53083 +}
53084 +
53085 +__u32
53086 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
53087 +{
53088 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
53089 + GR_UNIXCONNECT_ACL_MSG);
53090 +}
53091 +
53092 +/* hardlinks require at minimum create and link permission,
53093 + any additional privilege required is based on the
53094 + privilege of the file being linked to
53095 +*/
53096 +__u32
53097 +gr_acl_handle_link(const struct dentry * new_dentry,
53098 + const struct dentry * parent_dentry,
53099 + const struct vfsmount * parent_mnt,
53100 + const struct dentry * old_dentry,
53101 + const struct vfsmount * old_mnt, const char *to)
53102 +{
53103 + __u32 mode;
53104 + __u32 needmode = GR_CREATE | GR_LINK;
53105 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
53106 +
53107 + mode =
53108 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
53109 + old_mnt);
53110 +
53111 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
53112 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53113 + return mode;
53114 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53115 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
53116 + return 0;
53117 + } else if (unlikely((mode & needmode) != needmode))
53118 + return 0;
53119 +
53120 + return 1;
53121 +}
53122 +
53123 +__u32
53124 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53125 + const struct dentry * parent_dentry,
53126 + const struct vfsmount * parent_mnt, const char *from)
53127 +{
53128 + __u32 needmode = GR_WRITE | GR_CREATE;
53129 + __u32 mode;
53130 +
53131 + mode =
53132 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
53133 + GR_CREATE | GR_AUDIT_CREATE |
53134 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
53135 +
53136 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
53137 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53138 + return mode;
53139 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
53140 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
53141 + return 0;
53142 + } else if (unlikely((mode & needmode) != needmode))
53143 + return 0;
53144 +
53145 + return (GR_WRITE | GR_CREATE);
53146 +}
53147 +
53148 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
53149 +{
53150 + __u32 mode;
53151 +
53152 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53153 +
53154 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53155 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53156 + return mode;
53157 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53158 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53159 + return 0;
53160 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
53161 + return 0;
53162 +
53163 + return (reqmode);
53164 +}
53165 +
53166 +__u32
53167 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53168 + const struct dentry * parent_dentry,
53169 + const struct vfsmount * parent_mnt,
53170 + const int mode)
53171 +{
53172 + __u32 reqmode = GR_WRITE | GR_CREATE;
53173 + if (unlikely(mode & (S_ISUID | S_ISGID)))
53174 + reqmode |= GR_SETID;
53175 +
53176 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53177 + reqmode, GR_MKNOD_ACL_MSG);
53178 +}
53179 +
53180 +__u32
53181 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
53182 + const struct dentry *parent_dentry,
53183 + const struct vfsmount *parent_mnt)
53184 +{
53185 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53186 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53187 +}
53188 +
53189 +#define RENAME_CHECK_SUCCESS(old, new) \
53190 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53191 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53192 +
53193 +int
53194 +gr_acl_handle_rename(struct dentry *new_dentry,
53195 + struct dentry *parent_dentry,
53196 + const struct vfsmount *parent_mnt,
53197 + struct dentry *old_dentry,
53198 + struct inode *old_parent_inode,
53199 + struct vfsmount *old_mnt, const char *newname)
53200 +{
53201 + __u32 comp1, comp2;
53202 + int error = 0;
53203 +
53204 + if (unlikely(!gr_acl_is_enabled()))
53205 + return 0;
53206 +
53207 + if (!new_dentry->d_inode) {
53208 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53209 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53210 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53211 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53212 + GR_DELETE | GR_AUDIT_DELETE |
53213 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53214 + GR_SUPPRESS, old_mnt);
53215 + } else {
53216 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53217 + GR_CREATE | GR_DELETE |
53218 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53219 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53220 + GR_SUPPRESS, parent_mnt);
53221 + comp2 =
53222 + gr_search_file(old_dentry,
53223 + GR_READ | GR_WRITE | GR_AUDIT_READ |
53224 + GR_DELETE | GR_AUDIT_DELETE |
53225 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53226 + }
53227 +
53228 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53229 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53230 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53231 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53232 + && !(comp2 & GR_SUPPRESS)) {
53233 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53234 + error = -EACCES;
53235 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53236 + error = -EACCES;
53237 +
53238 + return error;
53239 +}
53240 +
53241 +void
53242 +gr_acl_handle_exit(void)
53243 +{
53244 + u16 id;
53245 + char *rolename;
53246 + struct file *exec_file;
53247 +
53248 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53249 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53250 + id = current->acl_role_id;
53251 + rolename = current->role->rolename;
53252 + gr_set_acls(1);
53253 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53254 + }
53255 +
53256 + write_lock(&grsec_exec_file_lock);
53257 + exec_file = current->exec_file;
53258 + current->exec_file = NULL;
53259 + write_unlock(&grsec_exec_file_lock);
53260 +
53261 + if (exec_file)
53262 + fput(exec_file);
53263 +}
53264 +
53265 +int
53266 +gr_acl_handle_procpidmem(const struct task_struct *task)
53267 +{
53268 + if (unlikely(!gr_acl_is_enabled()))
53269 + return 0;
53270 +
53271 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53272 + return -EACCES;
53273 +
53274 + return 0;
53275 +}
53276 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53277 new file mode 100644
53278 index 0000000..17050ca
53279 --- /dev/null
53280 +++ b/grsecurity/gracl_ip.c
53281 @@ -0,0 +1,381 @@
53282 +#include <linux/kernel.h>
53283 +#include <asm/uaccess.h>
53284 +#include <asm/errno.h>
53285 +#include <net/sock.h>
53286 +#include <linux/file.h>
53287 +#include <linux/fs.h>
53288 +#include <linux/net.h>
53289 +#include <linux/in.h>
53290 +#include <linux/skbuff.h>
53291 +#include <linux/ip.h>
53292 +#include <linux/udp.h>
53293 +#include <linux/types.h>
53294 +#include <linux/sched.h>
53295 +#include <linux/netdevice.h>
53296 +#include <linux/inetdevice.h>
53297 +#include <linux/gracl.h>
53298 +#include <linux/grsecurity.h>
53299 +#include <linux/grinternal.h>
53300 +
53301 +#define GR_BIND 0x01
53302 +#define GR_CONNECT 0x02
53303 +#define GR_INVERT 0x04
53304 +#define GR_BINDOVERRIDE 0x08
53305 +#define GR_CONNECTOVERRIDE 0x10
53306 +#define GR_SOCK_FAMILY 0x20
53307 +
53308 +static const char * gr_protocols[IPPROTO_MAX] = {
53309 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53310 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53311 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53312 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53313 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53314 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53315 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53316 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53317 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53318 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53319 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53320 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53321 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53322 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53323 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53324 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53325 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53326 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53327 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53328 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53329 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53330 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53331 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53332 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53333 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53334 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53335 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53336 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53337 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53338 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53339 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53340 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53341 + };
53342 +
53343 +static const char * gr_socktypes[SOCK_MAX] = {
53344 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53345 + "unknown:7", "unknown:8", "unknown:9", "packet"
53346 + };
53347 +
53348 +static const char * gr_sockfamilies[AF_MAX+1] = {
53349 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53350 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53351 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53352 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53353 + };
53354 +
53355 +const char *
53356 +gr_proto_to_name(unsigned char proto)
53357 +{
53358 + return gr_protocols[proto];
53359 +}
53360 +
53361 +const char *
53362 +gr_socktype_to_name(unsigned char type)
53363 +{
53364 + return gr_socktypes[type];
53365 +}
53366 +
53367 +const char *
53368 +gr_sockfamily_to_name(unsigned char family)
53369 +{
53370 + return gr_sockfamilies[family];
53371 +}
53372 +
53373 +int
53374 +gr_search_socket(const int domain, const int type, const int protocol)
53375 +{
53376 + struct acl_subject_label *curr;
53377 + const struct cred *cred = current_cred();
53378 +
53379 + if (unlikely(!gr_acl_is_enabled()))
53380 + goto exit;
53381 +
53382 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53383 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53384 + goto exit; // let the kernel handle it
53385 +
53386 + curr = current->acl;
53387 +
53388 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53389 + /* the family is allowed, if this is PF_INET allow it only if
53390 + the extra sock type/protocol checks pass */
53391 + if (domain == PF_INET)
53392 + goto inet_check;
53393 + goto exit;
53394 + } else {
53395 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53396 + __u32 fakeip = 0;
53397 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53398 + current->role->roletype, cred->uid,
53399 + cred->gid, current->exec_file ?
53400 + gr_to_filename(current->exec_file->f_path.dentry,
53401 + current->exec_file->f_path.mnt) :
53402 + curr->filename, curr->filename,
53403 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53404 + &current->signal->saved_ip);
53405 + goto exit;
53406 + }
53407 + goto exit_fail;
53408 + }
53409 +
53410 +inet_check:
53411 + /* the rest of this checking is for IPv4 only */
53412 + if (!curr->ips)
53413 + goto exit;
53414 +
53415 + if ((curr->ip_type & (1 << type)) &&
53416 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53417 + goto exit;
53418 +
53419 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53420 + /* we don't place acls on raw sockets , and sometimes
53421 + dgram/ip sockets are opened for ioctl and not
53422 + bind/connect, so we'll fake a bind learn log */
53423 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53424 + __u32 fakeip = 0;
53425 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53426 + current->role->roletype, cred->uid,
53427 + cred->gid, current->exec_file ?
53428 + gr_to_filename(current->exec_file->f_path.dentry,
53429 + current->exec_file->f_path.mnt) :
53430 + curr->filename, curr->filename,
53431 + &fakeip, 0, type,
53432 + protocol, GR_CONNECT, &current->signal->saved_ip);
53433 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53434 + __u32 fakeip = 0;
53435 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53436 + current->role->roletype, cred->uid,
53437 + cred->gid, current->exec_file ?
53438 + gr_to_filename(current->exec_file->f_path.dentry,
53439 + current->exec_file->f_path.mnt) :
53440 + curr->filename, curr->filename,
53441 + &fakeip, 0, type,
53442 + protocol, GR_BIND, &current->signal->saved_ip);
53443 + }
53444 + /* we'll log when they use connect or bind */
53445 + goto exit;
53446 + }
53447 +
53448 +exit_fail:
53449 + if (domain == PF_INET)
53450 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53451 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53452 + else
53453 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53454 + gr_socktype_to_name(type), protocol);
53455 +
53456 + return 0;
53457 +exit:
53458 + return 1;
53459 +}
53460 +
53461 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53462 +{
53463 + if ((ip->mode & mode) &&
53464 + (ip_port >= ip->low) &&
53465 + (ip_port <= ip->high) &&
53466 + ((ntohl(ip_addr) & our_netmask) ==
53467 + (ntohl(our_addr) & our_netmask))
53468 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53469 + && (ip->type & (1 << type))) {
53470 + if (ip->mode & GR_INVERT)
53471 + return 2; // specifically denied
53472 + else
53473 + return 1; // allowed
53474 + }
53475 +
53476 + return 0; // not specifically allowed, may continue parsing
53477 +}
53478 +
53479 +static int
53480 +gr_search_connectbind(const int full_mode, struct sock *sk,
53481 + struct sockaddr_in *addr, const int type)
53482 +{
53483 + char iface[IFNAMSIZ] = {0};
53484 + struct acl_subject_label *curr;
53485 + struct acl_ip_label *ip;
53486 + struct inet_sock *isk;
53487 + struct net_device *dev;
53488 + struct in_device *idev;
53489 + unsigned long i;
53490 + int ret;
53491 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53492 + __u32 ip_addr = 0;
53493 + __u32 our_addr;
53494 + __u32 our_netmask;
53495 + char *p;
53496 + __u16 ip_port = 0;
53497 + const struct cred *cred = current_cred();
53498 +
53499 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53500 + return 0;
53501 +
53502 + curr = current->acl;
53503 + isk = inet_sk(sk);
53504 +
53505 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53506 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53507 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53508 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53509 + struct sockaddr_in saddr;
53510 + int err;
53511 +
53512 + saddr.sin_family = AF_INET;
53513 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53514 + saddr.sin_port = isk->inet_sport;
53515 +
53516 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53517 + if (err)
53518 + return err;
53519 +
53520 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53521 + if (err)
53522 + return err;
53523 + }
53524 +
53525 + if (!curr->ips)
53526 + return 0;
53527 +
53528 + ip_addr = addr->sin_addr.s_addr;
53529 + ip_port = ntohs(addr->sin_port);
53530 +
53531 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53532 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53533 + current->role->roletype, cred->uid,
53534 + cred->gid, current->exec_file ?
53535 + gr_to_filename(current->exec_file->f_path.dentry,
53536 + current->exec_file->f_path.mnt) :
53537 + curr->filename, curr->filename,
53538 + &ip_addr, ip_port, type,
53539 + sk->sk_protocol, mode, &current->signal->saved_ip);
53540 + return 0;
53541 + }
53542 +
53543 + for (i = 0; i < curr->ip_num; i++) {
53544 + ip = *(curr->ips + i);
53545 + if (ip->iface != NULL) {
53546 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53547 + p = strchr(iface, ':');
53548 + if (p != NULL)
53549 + *p = '\0';
53550 + dev = dev_get_by_name(sock_net(sk), iface);
53551 + if (dev == NULL)
53552 + continue;
53553 + idev = in_dev_get(dev);
53554 + if (idev == NULL) {
53555 + dev_put(dev);
53556 + continue;
53557 + }
53558 + rcu_read_lock();
53559 + for_ifa(idev) {
53560 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53561 + our_addr = ifa->ifa_address;
53562 + our_netmask = 0xffffffff;
53563 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53564 + if (ret == 1) {
53565 + rcu_read_unlock();
53566 + in_dev_put(idev);
53567 + dev_put(dev);
53568 + return 0;
53569 + } else if (ret == 2) {
53570 + rcu_read_unlock();
53571 + in_dev_put(idev);
53572 + dev_put(dev);
53573 + goto denied;
53574 + }
53575 + }
53576 + } endfor_ifa(idev);
53577 + rcu_read_unlock();
53578 + in_dev_put(idev);
53579 + dev_put(dev);
53580 + } else {
53581 + our_addr = ip->addr;
53582 + our_netmask = ip->netmask;
53583 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53584 + if (ret == 1)
53585 + return 0;
53586 + else if (ret == 2)
53587 + goto denied;
53588 + }
53589 + }
53590 +
53591 +denied:
53592 + if (mode == GR_BIND)
53593 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53594 + else if (mode == GR_CONNECT)
53595 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53596 +
53597 + return -EACCES;
53598 +}
53599 +
53600 +int
53601 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53602 +{
53603 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53604 +}
53605 +
53606 +int
53607 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53608 +{
53609 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53610 +}
53611 +
53612 +int gr_search_listen(struct socket *sock)
53613 +{
53614 + struct sock *sk = sock->sk;
53615 + struct sockaddr_in addr;
53616 +
53617 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53618 + addr.sin_port = inet_sk(sk)->inet_sport;
53619 +
53620 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53621 +}
53622 +
53623 +int gr_search_accept(struct socket *sock)
53624 +{
53625 + struct sock *sk = sock->sk;
53626 + struct sockaddr_in addr;
53627 +
53628 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53629 + addr.sin_port = inet_sk(sk)->inet_sport;
53630 +
53631 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53632 +}
53633 +
53634 +int
53635 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53636 +{
53637 + if (addr)
53638 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53639 + else {
53640 + struct sockaddr_in sin;
53641 + const struct inet_sock *inet = inet_sk(sk);
53642 +
53643 + sin.sin_addr.s_addr = inet->inet_daddr;
53644 + sin.sin_port = inet->inet_dport;
53645 +
53646 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53647 + }
53648 +}
53649 +
53650 +int
53651 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53652 +{
53653 + struct sockaddr_in sin;
53654 +
53655 + if (unlikely(skb->len < sizeof (struct udphdr)))
53656 + return 0; // skip this packet
53657 +
53658 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53659 + sin.sin_port = udp_hdr(skb)->source;
53660 +
53661 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53662 +}
53663 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53664 new file mode 100644
53665 index 0000000..25f54ef
53666 --- /dev/null
53667 +++ b/grsecurity/gracl_learn.c
53668 @@ -0,0 +1,207 @@
53669 +#include <linux/kernel.h>
53670 +#include <linux/mm.h>
53671 +#include <linux/sched.h>
53672 +#include <linux/poll.h>
53673 +#include <linux/string.h>
53674 +#include <linux/file.h>
53675 +#include <linux/types.h>
53676 +#include <linux/vmalloc.h>
53677 +#include <linux/grinternal.h>
53678 +
53679 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53680 + size_t count, loff_t *ppos);
53681 +extern int gr_acl_is_enabled(void);
53682 +
53683 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53684 +static int gr_learn_attached;
53685 +
53686 +/* use a 512k buffer */
53687 +#define LEARN_BUFFER_SIZE (512 * 1024)
53688 +
53689 +static DEFINE_SPINLOCK(gr_learn_lock);
53690 +static DEFINE_MUTEX(gr_learn_user_mutex);
53691 +
53692 +/* we need to maintain two buffers, so that the kernel context of grlearn
53693 + uses a semaphore around the userspace copying, and the other kernel contexts
53694 + use a spinlock when copying into the buffer, since they cannot sleep
53695 +*/
53696 +static char *learn_buffer;
53697 +static char *learn_buffer_user;
53698 +static int learn_buffer_len;
53699 +static int learn_buffer_user_len;
53700 +
53701 +static ssize_t
53702 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53703 +{
53704 + DECLARE_WAITQUEUE(wait, current);
53705 + ssize_t retval = 0;
53706 +
53707 + add_wait_queue(&learn_wait, &wait);
53708 + set_current_state(TASK_INTERRUPTIBLE);
53709 + do {
53710 + mutex_lock(&gr_learn_user_mutex);
53711 + spin_lock(&gr_learn_lock);
53712 + if (learn_buffer_len)
53713 + break;
53714 + spin_unlock(&gr_learn_lock);
53715 + mutex_unlock(&gr_learn_user_mutex);
53716 + if (file->f_flags & O_NONBLOCK) {
53717 + retval = -EAGAIN;
53718 + goto out;
53719 + }
53720 + if (signal_pending(current)) {
53721 + retval = -ERESTARTSYS;
53722 + goto out;
53723 + }
53724 +
53725 + schedule();
53726 + } while (1);
53727 +
53728 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53729 + learn_buffer_user_len = learn_buffer_len;
53730 + retval = learn_buffer_len;
53731 + learn_buffer_len = 0;
53732 +
53733 + spin_unlock(&gr_learn_lock);
53734 +
53735 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53736 + retval = -EFAULT;
53737 +
53738 + mutex_unlock(&gr_learn_user_mutex);
53739 +out:
53740 + set_current_state(TASK_RUNNING);
53741 + remove_wait_queue(&learn_wait, &wait);
53742 + return retval;
53743 +}
53744 +
53745 +static unsigned int
53746 +poll_learn(struct file * file, poll_table * wait)
53747 +{
53748 + poll_wait(file, &learn_wait, wait);
53749 +
53750 + if (learn_buffer_len)
53751 + return (POLLIN | POLLRDNORM);
53752 +
53753 + return 0;
53754 +}
53755 +
53756 +void
53757 +gr_clear_learn_entries(void)
53758 +{
53759 + char *tmp;
53760 +
53761 + mutex_lock(&gr_learn_user_mutex);
53762 + spin_lock(&gr_learn_lock);
53763 + tmp = learn_buffer;
53764 + learn_buffer = NULL;
53765 + spin_unlock(&gr_learn_lock);
53766 + if (tmp)
53767 + vfree(tmp);
53768 + if (learn_buffer_user != NULL) {
53769 + vfree(learn_buffer_user);
53770 + learn_buffer_user = NULL;
53771 + }
53772 + learn_buffer_len = 0;
53773 + mutex_unlock(&gr_learn_user_mutex);
53774 +
53775 + return;
53776 +}
53777 +
53778 +void
53779 +gr_add_learn_entry(const char *fmt, ...)
53780 +{
53781 + va_list args;
53782 + unsigned int len;
53783 +
53784 + if (!gr_learn_attached)
53785 + return;
53786 +
53787 + spin_lock(&gr_learn_lock);
53788 +
53789 + /* leave a gap at the end so we know when it's "full" but don't have to
53790 + compute the exact length of the string we're trying to append
53791 + */
53792 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53793 + spin_unlock(&gr_learn_lock);
53794 + wake_up_interruptible(&learn_wait);
53795 + return;
53796 + }
53797 + if (learn_buffer == NULL) {
53798 + spin_unlock(&gr_learn_lock);
53799 + return;
53800 + }
53801 +
53802 + va_start(args, fmt);
53803 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53804 + va_end(args);
53805 +
53806 + learn_buffer_len += len + 1;
53807 +
53808 + spin_unlock(&gr_learn_lock);
53809 + wake_up_interruptible(&learn_wait);
53810 +
53811 + return;
53812 +}
53813 +
53814 +static int
53815 +open_learn(struct inode *inode, struct file *file)
53816 +{
53817 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53818 + return -EBUSY;
53819 + if (file->f_mode & FMODE_READ) {
53820 + int retval = 0;
53821 + mutex_lock(&gr_learn_user_mutex);
53822 + if (learn_buffer == NULL)
53823 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53824 + if (learn_buffer_user == NULL)
53825 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53826 + if (learn_buffer == NULL) {
53827 + retval = -ENOMEM;
53828 + goto out_error;
53829 + }
53830 + if (learn_buffer_user == NULL) {
53831 + retval = -ENOMEM;
53832 + goto out_error;
53833 + }
53834 + learn_buffer_len = 0;
53835 + learn_buffer_user_len = 0;
53836 + gr_learn_attached = 1;
53837 +out_error:
53838 + mutex_unlock(&gr_learn_user_mutex);
53839 + return retval;
53840 + }
53841 + return 0;
53842 +}
53843 +
53844 +static int
53845 +close_learn(struct inode *inode, struct file *file)
53846 +{
53847 + if (file->f_mode & FMODE_READ) {
53848 + char *tmp = NULL;
53849 + mutex_lock(&gr_learn_user_mutex);
53850 + spin_lock(&gr_learn_lock);
53851 + tmp = learn_buffer;
53852 + learn_buffer = NULL;
53853 + spin_unlock(&gr_learn_lock);
53854 + if (tmp)
53855 + vfree(tmp);
53856 + if (learn_buffer_user != NULL) {
53857 + vfree(learn_buffer_user);
53858 + learn_buffer_user = NULL;
53859 + }
53860 + learn_buffer_len = 0;
53861 + learn_buffer_user_len = 0;
53862 + gr_learn_attached = 0;
53863 + mutex_unlock(&gr_learn_user_mutex);
53864 + }
53865 +
53866 + return 0;
53867 +}
53868 +
53869 +const struct file_operations grsec_fops = {
53870 + .read = read_learn,
53871 + .write = write_grsec_handler,
53872 + .open = open_learn,
53873 + .release = close_learn,
53874 + .poll = poll_learn,
53875 +};
53876 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53877 new file mode 100644
53878 index 0000000..39645c9
53879 --- /dev/null
53880 +++ b/grsecurity/gracl_res.c
53881 @@ -0,0 +1,68 @@
53882 +#include <linux/kernel.h>
53883 +#include <linux/sched.h>
53884 +#include <linux/gracl.h>
53885 +#include <linux/grinternal.h>
53886 +
53887 +static const char *restab_log[] = {
53888 + [RLIMIT_CPU] = "RLIMIT_CPU",
53889 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53890 + [RLIMIT_DATA] = "RLIMIT_DATA",
53891 + [RLIMIT_STACK] = "RLIMIT_STACK",
53892 + [RLIMIT_CORE] = "RLIMIT_CORE",
53893 + [RLIMIT_RSS] = "RLIMIT_RSS",
53894 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53895 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53896 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53897 + [RLIMIT_AS] = "RLIMIT_AS",
53898 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53899 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53900 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53901 + [RLIMIT_NICE] = "RLIMIT_NICE",
53902 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53903 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53904 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53905 +};
53906 +
53907 +void
53908 +gr_log_resource(const struct task_struct *task,
53909 + const int res, const unsigned long wanted, const int gt)
53910 +{
53911 + const struct cred *cred;
53912 + unsigned long rlim;
53913 +
53914 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53915 + return;
53916 +
53917 + // not yet supported resource
53918 + if (unlikely(!restab_log[res]))
53919 + return;
53920 +
53921 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53922 + rlim = task_rlimit_max(task, res);
53923 + else
53924 + rlim = task_rlimit(task, res);
53925 +
53926 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53927 + return;
53928 +
53929 + rcu_read_lock();
53930 + cred = __task_cred(task);
53931 +
53932 + if (res == RLIMIT_NPROC &&
53933 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53934 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53935 + goto out_rcu_unlock;
53936 + else if (res == RLIMIT_MEMLOCK &&
53937 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53938 + goto out_rcu_unlock;
53939 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53940 + goto out_rcu_unlock;
53941 + rcu_read_unlock();
53942 +
53943 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53944 +
53945 + return;
53946 +out_rcu_unlock:
53947 + rcu_read_unlock();
53948 + return;
53949 +}
53950 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53951 new file mode 100644
53952 index 0000000..5556be3
53953 --- /dev/null
53954 +++ b/grsecurity/gracl_segv.c
53955 @@ -0,0 +1,299 @@
53956 +#include <linux/kernel.h>
53957 +#include <linux/mm.h>
53958 +#include <asm/uaccess.h>
53959 +#include <asm/errno.h>
53960 +#include <asm/mman.h>
53961 +#include <net/sock.h>
53962 +#include <linux/file.h>
53963 +#include <linux/fs.h>
53964 +#include <linux/net.h>
53965 +#include <linux/in.h>
53966 +#include <linux/slab.h>
53967 +#include <linux/types.h>
53968 +#include <linux/sched.h>
53969 +#include <linux/timer.h>
53970 +#include <linux/gracl.h>
53971 +#include <linux/grsecurity.h>
53972 +#include <linux/grinternal.h>
53973 +
53974 +static struct crash_uid *uid_set;
53975 +static unsigned short uid_used;
53976 +static DEFINE_SPINLOCK(gr_uid_lock);
53977 +extern rwlock_t gr_inode_lock;
53978 +extern struct acl_subject_label *
53979 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53980 + struct acl_role_label *role);
53981 +
53982 +#ifdef CONFIG_BTRFS_FS
53983 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53984 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53985 +#endif
53986 +
53987 +static inline dev_t __get_dev(const struct dentry *dentry)
53988 +{
53989 +#ifdef CONFIG_BTRFS_FS
53990 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53991 + return get_btrfs_dev_from_inode(dentry->d_inode);
53992 + else
53993 +#endif
53994 + return dentry->d_inode->i_sb->s_dev;
53995 +}
53996 +
53997 +int
53998 +gr_init_uidset(void)
53999 +{
54000 + uid_set =
54001 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
54002 + uid_used = 0;
54003 +
54004 + return uid_set ? 1 : 0;
54005 +}
54006 +
54007 +void
54008 +gr_free_uidset(void)
54009 +{
54010 + if (uid_set)
54011 + kfree(uid_set);
54012 +
54013 + return;
54014 +}
54015 +
54016 +int
54017 +gr_find_uid(const uid_t uid)
54018 +{
54019 + struct crash_uid *tmp = uid_set;
54020 + uid_t buid;
54021 + int low = 0, high = uid_used - 1, mid;
54022 +
54023 + while (high >= low) {
54024 + mid = (low + high) >> 1;
54025 + buid = tmp[mid].uid;
54026 + if (buid == uid)
54027 + return mid;
54028 + if (buid > uid)
54029 + high = mid - 1;
54030 + if (buid < uid)
54031 + low = mid + 1;
54032 + }
54033 +
54034 + return -1;
54035 +}
54036 +
54037 +static __inline__ void
54038 +gr_insertsort(void)
54039 +{
54040 + unsigned short i, j;
54041 + struct crash_uid index;
54042 +
54043 + for (i = 1; i < uid_used; i++) {
54044 + index = uid_set[i];
54045 + j = i;
54046 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
54047 + uid_set[j] = uid_set[j - 1];
54048 + j--;
54049 + }
54050 + uid_set[j] = index;
54051 + }
54052 +
54053 + return;
54054 +}
54055 +
54056 +static __inline__ void
54057 +gr_insert_uid(const uid_t uid, const unsigned long expires)
54058 +{
54059 + int loc;
54060 +
54061 + if (uid_used == GR_UIDTABLE_MAX)
54062 + return;
54063 +
54064 + loc = gr_find_uid(uid);
54065 +
54066 + if (loc >= 0) {
54067 + uid_set[loc].expires = expires;
54068 + return;
54069 + }
54070 +
54071 + uid_set[uid_used].uid = uid;
54072 + uid_set[uid_used].expires = expires;
54073 + uid_used++;
54074 +
54075 + gr_insertsort();
54076 +
54077 + return;
54078 +}
54079 +
54080 +void
54081 +gr_remove_uid(const unsigned short loc)
54082 +{
54083 + unsigned short i;
54084 +
54085 + for (i = loc + 1; i < uid_used; i++)
54086 + uid_set[i - 1] = uid_set[i];
54087 +
54088 + uid_used--;
54089 +
54090 + return;
54091 +}
54092 +
54093 +int
54094 +gr_check_crash_uid(const uid_t uid)
54095 +{
54096 + int loc;
54097 + int ret = 0;
54098 +
54099 + if (unlikely(!gr_acl_is_enabled()))
54100 + return 0;
54101 +
54102 + spin_lock(&gr_uid_lock);
54103 + loc = gr_find_uid(uid);
54104 +
54105 + if (loc < 0)
54106 + goto out_unlock;
54107 +
54108 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
54109 + gr_remove_uid(loc);
54110 + else
54111 + ret = 1;
54112 +
54113 +out_unlock:
54114 + spin_unlock(&gr_uid_lock);
54115 + return ret;
54116 +}
54117 +
54118 +static __inline__ int
54119 +proc_is_setxid(const struct cred *cred)
54120 +{
54121 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
54122 + cred->uid != cred->fsuid)
54123 + return 1;
54124 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
54125 + cred->gid != cred->fsgid)
54126 + return 1;
54127 +
54128 + return 0;
54129 +}
54130 +
54131 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
54132 +
54133 +void
54134 +gr_handle_crash(struct task_struct *task, const int sig)
54135 +{
54136 + struct acl_subject_label *curr;
54137 + struct task_struct *tsk, *tsk2;
54138 + const struct cred *cred;
54139 + const struct cred *cred2;
54140 +
54141 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
54142 + return;
54143 +
54144 + if (unlikely(!gr_acl_is_enabled()))
54145 + return;
54146 +
54147 + curr = task->acl;
54148 +
54149 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
54150 + return;
54151 +
54152 + if (time_before_eq(curr->expires, get_seconds())) {
54153 + curr->expires = 0;
54154 + curr->crashes = 0;
54155 + }
54156 +
54157 + curr->crashes++;
54158 +
54159 + if (!curr->expires)
54160 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54161 +
54162 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54163 + time_after(curr->expires, get_seconds())) {
54164 + rcu_read_lock();
54165 + cred = __task_cred(task);
54166 + if (cred->uid && proc_is_setxid(cred)) {
54167 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54168 + spin_lock(&gr_uid_lock);
54169 + gr_insert_uid(cred->uid, curr->expires);
54170 + spin_unlock(&gr_uid_lock);
54171 + curr->expires = 0;
54172 + curr->crashes = 0;
54173 + read_lock(&tasklist_lock);
54174 + do_each_thread(tsk2, tsk) {
54175 + cred2 = __task_cred(tsk);
54176 + if (tsk != task && cred2->uid == cred->uid)
54177 + gr_fake_force_sig(SIGKILL, tsk);
54178 + } while_each_thread(tsk2, tsk);
54179 + read_unlock(&tasklist_lock);
54180 + } else {
54181 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54182 + read_lock(&tasklist_lock);
54183 + read_lock(&grsec_exec_file_lock);
54184 + do_each_thread(tsk2, tsk) {
54185 + if (likely(tsk != task)) {
54186 + // if this thread has the same subject as the one that triggered
54187 + // RES_CRASH and it's the same binary, kill it
54188 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54189 + gr_fake_force_sig(SIGKILL, tsk);
54190 + }
54191 + } while_each_thread(tsk2, tsk);
54192 + read_unlock(&grsec_exec_file_lock);
54193 + read_unlock(&tasklist_lock);
54194 + }
54195 + rcu_read_unlock();
54196 + }
54197 +
54198 + return;
54199 +}
54200 +
54201 +int
54202 +gr_check_crash_exec(const struct file *filp)
54203 +{
54204 + struct acl_subject_label *curr;
54205 +
54206 + if (unlikely(!gr_acl_is_enabled()))
54207 + return 0;
54208 +
54209 + read_lock(&gr_inode_lock);
54210 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54211 + __get_dev(filp->f_path.dentry),
54212 + current->role);
54213 + read_unlock(&gr_inode_lock);
54214 +
54215 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54216 + (!curr->crashes && !curr->expires))
54217 + return 0;
54218 +
54219 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54220 + time_after(curr->expires, get_seconds()))
54221 + return 1;
54222 + else if (time_before_eq(curr->expires, get_seconds())) {
54223 + curr->crashes = 0;
54224 + curr->expires = 0;
54225 + }
54226 +
54227 + return 0;
54228 +}
54229 +
54230 +void
54231 +gr_handle_alertkill(struct task_struct *task)
54232 +{
54233 + struct acl_subject_label *curracl;
54234 + __u32 curr_ip;
54235 + struct task_struct *p, *p2;
54236 +
54237 + if (unlikely(!gr_acl_is_enabled()))
54238 + return;
54239 +
54240 + curracl = task->acl;
54241 + curr_ip = task->signal->curr_ip;
54242 +
54243 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54244 + read_lock(&tasklist_lock);
54245 + do_each_thread(p2, p) {
54246 + if (p->signal->curr_ip == curr_ip)
54247 + gr_fake_force_sig(SIGKILL, p);
54248 + } while_each_thread(p2, p);
54249 + read_unlock(&tasklist_lock);
54250 + } else if (curracl->mode & GR_KILLPROC)
54251 + gr_fake_force_sig(SIGKILL, task);
54252 +
54253 + return;
54254 +}
54255 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54256 new file mode 100644
54257 index 0000000..9d83a69
54258 --- /dev/null
54259 +++ b/grsecurity/gracl_shm.c
54260 @@ -0,0 +1,40 @@
54261 +#include <linux/kernel.h>
54262 +#include <linux/mm.h>
54263 +#include <linux/sched.h>
54264 +#include <linux/file.h>
54265 +#include <linux/ipc.h>
54266 +#include <linux/gracl.h>
54267 +#include <linux/grsecurity.h>
54268 +#include <linux/grinternal.h>
54269 +
54270 +int
54271 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54272 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54273 +{
54274 + struct task_struct *task;
54275 +
54276 + if (!gr_acl_is_enabled())
54277 + return 1;
54278 +
54279 + rcu_read_lock();
54280 + read_lock(&tasklist_lock);
54281 +
54282 + task = find_task_by_vpid(shm_cprid);
54283 +
54284 + if (unlikely(!task))
54285 + task = find_task_by_vpid(shm_lapid);
54286 +
54287 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54288 + (task->pid == shm_lapid)) &&
54289 + (task->acl->mode & GR_PROTSHM) &&
54290 + (task->acl != current->acl))) {
54291 + read_unlock(&tasklist_lock);
54292 + rcu_read_unlock();
54293 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54294 + return 0;
54295 + }
54296 + read_unlock(&tasklist_lock);
54297 + rcu_read_unlock();
54298 +
54299 + return 1;
54300 +}
54301 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54302 new file mode 100644
54303 index 0000000..bc0be01
54304 --- /dev/null
54305 +++ b/grsecurity/grsec_chdir.c
54306 @@ -0,0 +1,19 @@
54307 +#include <linux/kernel.h>
54308 +#include <linux/sched.h>
54309 +#include <linux/fs.h>
54310 +#include <linux/file.h>
54311 +#include <linux/grsecurity.h>
54312 +#include <linux/grinternal.h>
54313 +
54314 +void
54315 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54316 +{
54317 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54318 + if ((grsec_enable_chdir && grsec_enable_group &&
54319 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54320 + !grsec_enable_group)) {
54321 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54322 + }
54323 +#endif
54324 + return;
54325 +}
54326 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54327 new file mode 100644
54328 index 0000000..a2dc675
54329 --- /dev/null
54330 +++ b/grsecurity/grsec_chroot.c
54331 @@ -0,0 +1,351 @@
54332 +#include <linux/kernel.h>
54333 +#include <linux/module.h>
54334 +#include <linux/sched.h>
54335 +#include <linux/file.h>
54336 +#include <linux/fs.h>
54337 +#include <linux/mount.h>
54338 +#include <linux/types.h>
54339 +#include <linux/pid_namespace.h>
54340 +#include <linux/grsecurity.h>
54341 +#include <linux/grinternal.h>
54342 +
54343 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54344 +{
54345 +#ifdef CONFIG_GRKERNSEC
54346 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54347 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54348 + task->gr_is_chrooted = 1;
54349 + else
54350 + task->gr_is_chrooted = 0;
54351 +
54352 + task->gr_chroot_dentry = path->dentry;
54353 +#endif
54354 + return;
54355 +}
54356 +
54357 +void gr_clear_chroot_entries(struct task_struct *task)
54358 +{
54359 +#ifdef CONFIG_GRKERNSEC
54360 + task->gr_is_chrooted = 0;
54361 + task->gr_chroot_dentry = NULL;
54362 +#endif
54363 + return;
54364 +}
54365 +
54366 +int
54367 +gr_handle_chroot_unix(const pid_t pid)
54368 +{
54369 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54370 + struct task_struct *p;
54371 +
54372 + if (unlikely(!grsec_enable_chroot_unix))
54373 + return 1;
54374 +
54375 + if (likely(!proc_is_chrooted(current)))
54376 + return 1;
54377 +
54378 + rcu_read_lock();
54379 + read_lock(&tasklist_lock);
54380 + p = find_task_by_vpid_unrestricted(pid);
54381 + if (unlikely(p && !have_same_root(current, p))) {
54382 + read_unlock(&tasklist_lock);
54383 + rcu_read_unlock();
54384 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54385 + return 0;
54386 + }
54387 + read_unlock(&tasklist_lock);
54388 + rcu_read_unlock();
54389 +#endif
54390 + return 1;
54391 +}
54392 +
54393 +int
54394 +gr_handle_chroot_nice(void)
54395 +{
54396 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54397 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54398 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54399 + return -EPERM;
54400 + }
54401 +#endif
54402 + return 0;
54403 +}
54404 +
54405 +int
54406 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54407 +{
54408 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54409 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54410 + && proc_is_chrooted(current)) {
54411 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54412 + return -EACCES;
54413 + }
54414 +#endif
54415 + return 0;
54416 +}
54417 +
54418 +int
54419 +gr_handle_chroot_rawio(const struct inode *inode)
54420 +{
54421 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54422 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54423 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54424 + return 1;
54425 +#endif
54426 + return 0;
54427 +}
54428 +
54429 +int
54430 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54431 +{
54432 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54433 + struct task_struct *p;
54434 + int ret = 0;
54435 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54436 + return ret;
54437 +
54438 + read_lock(&tasklist_lock);
54439 + do_each_pid_task(pid, type, p) {
54440 + if (!have_same_root(current, p)) {
54441 + ret = 1;
54442 + goto out;
54443 + }
54444 + } while_each_pid_task(pid, type, p);
54445 +out:
54446 + read_unlock(&tasklist_lock);
54447 + return ret;
54448 +#endif
54449 + return 0;
54450 +}
54451 +
54452 +int
54453 +gr_pid_is_chrooted(struct task_struct *p)
54454 +{
54455 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54456 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54457 + return 0;
54458 +
54459 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54460 + !have_same_root(current, p)) {
54461 + return 1;
54462 + }
54463 +#endif
54464 + return 0;
54465 +}
54466 +
54467 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54468 +
54469 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54470 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54471 +{
54472 + struct path path, currentroot;
54473 + int ret = 0;
54474 +
54475 + path.dentry = (struct dentry *)u_dentry;
54476 + path.mnt = (struct vfsmount *)u_mnt;
54477 + get_fs_root(current->fs, &currentroot);
54478 + if (path_is_under(&path, &currentroot))
54479 + ret = 1;
54480 + path_put(&currentroot);
54481 +
54482 + return ret;
54483 +}
54484 +#endif
54485 +
54486 +int
54487 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54488 +{
54489 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54490 + if (!grsec_enable_chroot_fchdir)
54491 + return 1;
54492 +
54493 + if (!proc_is_chrooted(current))
54494 + return 1;
54495 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54496 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54497 + return 0;
54498 + }
54499 +#endif
54500 + return 1;
54501 +}
54502 +
54503 +int
54504 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54505 + const time_t shm_createtime)
54506 +{
54507 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54508 + struct task_struct *p;
54509 + time_t starttime;
54510 +
54511 + if (unlikely(!grsec_enable_chroot_shmat))
54512 + return 1;
54513 +
54514 + if (likely(!proc_is_chrooted(current)))
54515 + return 1;
54516 +
54517 + rcu_read_lock();
54518 + read_lock(&tasklist_lock);
54519 +
54520 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54521 + starttime = p->start_time.tv_sec;
54522 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54523 + if (have_same_root(current, p)) {
54524 + goto allow;
54525 + } else {
54526 + read_unlock(&tasklist_lock);
54527 + rcu_read_unlock();
54528 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54529 + return 0;
54530 + }
54531 + }
54532 + /* creator exited, pid reuse, fall through to next check */
54533 + }
54534 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54535 + if (unlikely(!have_same_root(current, p))) {
54536 + read_unlock(&tasklist_lock);
54537 + rcu_read_unlock();
54538 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54539 + return 0;
54540 + }
54541 + }
54542 +
54543 +allow:
54544 + read_unlock(&tasklist_lock);
54545 + rcu_read_unlock();
54546 +#endif
54547 + return 1;
54548 +}
54549 +
54550 +void
54551 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54552 +{
54553 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54554 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54555 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54556 +#endif
54557 + return;
54558 +}
54559 +
54560 +int
54561 +gr_handle_chroot_mknod(const struct dentry *dentry,
54562 + const struct vfsmount *mnt, const int mode)
54563 +{
54564 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54565 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54566 + proc_is_chrooted(current)) {
54567 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54568 + return -EPERM;
54569 + }
54570 +#endif
54571 + return 0;
54572 +}
54573 +
54574 +int
54575 +gr_handle_chroot_mount(const struct dentry *dentry,
54576 + const struct vfsmount *mnt, const char *dev_name)
54577 +{
54578 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54579 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54580 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54581 + return -EPERM;
54582 + }
54583 +#endif
54584 + return 0;
54585 +}
54586 +
54587 +int
54588 +gr_handle_chroot_pivot(void)
54589 +{
54590 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54591 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54592 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54593 + return -EPERM;
54594 + }
54595 +#endif
54596 + return 0;
54597 +}
54598 +
54599 +int
54600 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54601 +{
54602 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54603 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54604 + !gr_is_outside_chroot(dentry, mnt)) {
54605 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54606 + return -EPERM;
54607 + }
54608 +#endif
54609 + return 0;
54610 +}
54611 +
54612 +extern const char *captab_log[];
54613 +extern int captab_log_entries;
54614 +
54615 +int
54616 +gr_chroot_is_capable(const int cap)
54617 +{
54618 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54619 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54620 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54621 + if (cap_raised(chroot_caps, cap)) {
54622 + const struct cred *creds = current_cred();
54623 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54624 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54625 + }
54626 + return 0;
54627 + }
54628 + }
54629 +#endif
54630 + return 1;
54631 +}
54632 +
54633 +int
54634 +gr_chroot_is_capable_nolog(const int cap)
54635 +{
54636 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54637 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54638 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54639 + if (cap_raised(chroot_caps, cap)) {
54640 + return 0;
54641 + }
54642 + }
54643 +#endif
54644 + return 1;
54645 +}
54646 +
54647 +int
54648 +gr_handle_chroot_sysctl(const int op)
54649 +{
54650 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54651 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54652 + proc_is_chrooted(current))
54653 + return -EACCES;
54654 +#endif
54655 + return 0;
54656 +}
54657 +
54658 +void
54659 +gr_handle_chroot_chdir(struct path *path)
54660 +{
54661 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54662 + if (grsec_enable_chroot_chdir)
54663 + set_fs_pwd(current->fs, path);
54664 +#endif
54665 + return;
54666 +}
54667 +
54668 +int
54669 +gr_handle_chroot_chmod(const struct dentry *dentry,
54670 + const struct vfsmount *mnt, const int mode)
54671 +{
54672 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54673 + /* allow chmod +s on directories, but not files */
54674 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54675 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54676 + proc_is_chrooted(current)) {
54677 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54678 + return -EPERM;
54679 + }
54680 +#endif
54681 + return 0;
54682 +}
54683 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54684 new file mode 100644
54685 index 0000000..213ad8b
54686 --- /dev/null
54687 +++ b/grsecurity/grsec_disabled.c
54688 @@ -0,0 +1,437 @@
54689 +#include <linux/kernel.h>
54690 +#include <linux/module.h>
54691 +#include <linux/sched.h>
54692 +#include <linux/file.h>
54693 +#include <linux/fs.h>
54694 +#include <linux/kdev_t.h>
54695 +#include <linux/net.h>
54696 +#include <linux/in.h>
54697 +#include <linux/ip.h>
54698 +#include <linux/skbuff.h>
54699 +#include <linux/sysctl.h>
54700 +
54701 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54702 +void
54703 +pax_set_initial_flags(struct linux_binprm *bprm)
54704 +{
54705 + return;
54706 +}
54707 +#endif
54708 +
54709 +#ifdef CONFIG_SYSCTL
54710 +__u32
54711 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54712 +{
54713 + return 0;
54714 +}
54715 +#endif
54716 +
54717 +#ifdef CONFIG_TASKSTATS
54718 +int gr_is_taskstats_denied(int pid)
54719 +{
54720 + return 0;
54721 +}
54722 +#endif
54723 +
54724 +int
54725 +gr_acl_is_enabled(void)
54726 +{
54727 + return 0;
54728 +}
54729 +
54730 +void
54731 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54732 +{
54733 + return;
54734 +}
54735 +
54736 +int
54737 +gr_handle_rawio(const struct inode *inode)
54738 +{
54739 + return 0;
54740 +}
54741 +
54742 +void
54743 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54744 +{
54745 + return;
54746 +}
54747 +
54748 +int
54749 +gr_handle_ptrace(struct task_struct *task, const long request)
54750 +{
54751 + return 0;
54752 +}
54753 +
54754 +int
54755 +gr_handle_proc_ptrace(struct task_struct *task)
54756 +{
54757 + return 0;
54758 +}
54759 +
54760 +void
54761 +gr_learn_resource(const struct task_struct *task,
54762 + const int res, const unsigned long wanted, const int gt)
54763 +{
54764 + return;
54765 +}
54766 +
54767 +int
54768 +gr_set_acls(const int type)
54769 +{
54770 + return 0;
54771 +}
54772 +
54773 +int
54774 +gr_check_hidden_task(const struct task_struct *tsk)
54775 +{
54776 + return 0;
54777 +}
54778 +
54779 +int
54780 +gr_check_protected_task(const struct task_struct *task)
54781 +{
54782 + return 0;
54783 +}
54784 +
54785 +int
54786 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54787 +{
54788 + return 0;
54789 +}
54790 +
54791 +void
54792 +gr_copy_label(struct task_struct *tsk)
54793 +{
54794 + return;
54795 +}
54796 +
54797 +void
54798 +gr_set_pax_flags(struct task_struct *task)
54799 +{
54800 + return;
54801 +}
54802 +
54803 +int
54804 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54805 + const int unsafe_share)
54806 +{
54807 + return 0;
54808 +}
54809 +
54810 +void
54811 +gr_handle_delete(const ino_t ino, const dev_t dev)
54812 +{
54813 + return;
54814 +}
54815 +
54816 +void
54817 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54818 +{
54819 + return;
54820 +}
54821 +
54822 +void
54823 +gr_handle_crash(struct task_struct *task, const int sig)
54824 +{
54825 + return;
54826 +}
54827 +
54828 +int
54829 +gr_check_crash_exec(const struct file *filp)
54830 +{
54831 + return 0;
54832 +}
54833 +
54834 +int
54835 +gr_check_crash_uid(const uid_t uid)
54836 +{
54837 + return 0;
54838 +}
54839 +
54840 +void
54841 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54842 + struct dentry *old_dentry,
54843 + struct dentry *new_dentry,
54844 + struct vfsmount *mnt, const __u8 replace)
54845 +{
54846 + return;
54847 +}
54848 +
54849 +int
54850 +gr_search_socket(const int family, const int type, const int protocol)
54851 +{
54852 + return 1;
54853 +}
54854 +
54855 +int
54856 +gr_search_connectbind(const int mode, const struct socket *sock,
54857 + const struct sockaddr_in *addr)
54858 +{
54859 + return 0;
54860 +}
54861 +
54862 +void
54863 +gr_handle_alertkill(struct task_struct *task)
54864 +{
54865 + return;
54866 +}
54867 +
54868 +__u32
54869 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54870 +{
54871 + return 1;
54872 +}
54873 +
54874 +__u32
54875 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54876 + const struct vfsmount * mnt)
54877 +{
54878 + return 1;
54879 +}
54880 +
54881 +__u32
54882 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54883 + int acc_mode)
54884 +{
54885 + return 1;
54886 +}
54887 +
54888 +__u32
54889 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54890 +{
54891 + return 1;
54892 +}
54893 +
54894 +__u32
54895 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54896 +{
54897 + return 1;
54898 +}
54899 +
54900 +int
54901 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54902 + unsigned int *vm_flags)
54903 +{
54904 + return 1;
54905 +}
54906 +
54907 +__u32
54908 +gr_acl_handle_truncate(const struct dentry * dentry,
54909 + const struct vfsmount * mnt)
54910 +{
54911 + return 1;
54912 +}
54913 +
54914 +__u32
54915 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54916 +{
54917 + return 1;
54918 +}
54919 +
54920 +__u32
54921 +gr_acl_handle_access(const struct dentry * dentry,
54922 + const struct vfsmount * mnt, const int fmode)
54923 +{
54924 + return 1;
54925 +}
54926 +
54927 +__u32
54928 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54929 + umode_t *mode)
54930 +{
54931 + return 1;
54932 +}
54933 +
54934 +__u32
54935 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54936 +{
54937 + return 1;
54938 +}
54939 +
54940 +__u32
54941 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54942 +{
54943 + return 1;
54944 +}
54945 +
54946 +void
54947 +grsecurity_init(void)
54948 +{
54949 + return;
54950 +}
54951 +
54952 +umode_t gr_acl_umask(void)
54953 +{
54954 + return 0;
54955 +}
54956 +
54957 +__u32
54958 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54959 + const struct dentry * parent_dentry,
54960 + const struct vfsmount * parent_mnt,
54961 + const int mode)
54962 +{
54963 + return 1;
54964 +}
54965 +
54966 +__u32
54967 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54968 + const struct dentry * parent_dentry,
54969 + const struct vfsmount * parent_mnt)
54970 +{
54971 + return 1;
54972 +}
54973 +
54974 +__u32
54975 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54976 + const struct dentry * parent_dentry,
54977 + const struct vfsmount * parent_mnt, const char *from)
54978 +{
54979 + return 1;
54980 +}
54981 +
54982 +__u32
54983 +gr_acl_handle_link(const struct dentry * new_dentry,
54984 + const struct dentry * parent_dentry,
54985 + const struct vfsmount * parent_mnt,
54986 + const struct dentry * old_dentry,
54987 + const struct vfsmount * old_mnt, const char *to)
54988 +{
54989 + return 1;
54990 +}
54991 +
54992 +int
54993 +gr_acl_handle_rename(const struct dentry *new_dentry,
54994 + const struct dentry *parent_dentry,
54995 + const struct vfsmount *parent_mnt,
54996 + const struct dentry *old_dentry,
54997 + const struct inode *old_parent_inode,
54998 + const struct vfsmount *old_mnt, const char *newname)
54999 +{
55000 + return 0;
55001 +}
55002 +
55003 +int
55004 +gr_acl_handle_filldir(const struct file *file, const char *name,
55005 + const int namelen, const ino_t ino)
55006 +{
55007 + return 1;
55008 +}
55009 +
55010 +int
55011 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55012 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55013 +{
55014 + return 1;
55015 +}
55016 +
55017 +int
55018 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
55019 +{
55020 + return 0;
55021 +}
55022 +
55023 +int
55024 +gr_search_accept(const struct socket *sock)
55025 +{
55026 + return 0;
55027 +}
55028 +
55029 +int
55030 +gr_search_listen(const struct socket *sock)
55031 +{
55032 + return 0;
55033 +}
55034 +
55035 +int
55036 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
55037 +{
55038 + return 0;
55039 +}
55040 +
55041 +__u32
55042 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
55043 +{
55044 + return 1;
55045 +}
55046 +
55047 +__u32
55048 +gr_acl_handle_creat(const struct dentry * dentry,
55049 + const struct dentry * p_dentry,
55050 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55051 + const int imode)
55052 +{
55053 + return 1;
55054 +}
55055 +
55056 +void
55057 +gr_acl_handle_exit(void)
55058 +{
55059 + return;
55060 +}
55061 +
55062 +int
55063 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55064 +{
55065 + return 1;
55066 +}
55067 +
55068 +void
55069 +gr_set_role_label(const uid_t uid, const gid_t gid)
55070 +{
55071 + return;
55072 +}
55073 +
55074 +int
55075 +gr_acl_handle_procpidmem(const struct task_struct *task)
55076 +{
55077 + return 0;
55078 +}
55079 +
55080 +int
55081 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
55082 +{
55083 + return 0;
55084 +}
55085 +
55086 +int
55087 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
55088 +{
55089 + return 0;
55090 +}
55091 +
55092 +void
55093 +gr_set_kernel_label(struct task_struct *task)
55094 +{
55095 + return;
55096 +}
55097 +
55098 +int
55099 +gr_check_user_change(int real, int effective, int fs)
55100 +{
55101 + return 0;
55102 +}
55103 +
55104 +int
55105 +gr_check_group_change(int real, int effective, int fs)
55106 +{
55107 + return 0;
55108 +}
55109 +
55110 +int gr_acl_enable_at_secure(void)
55111 +{
55112 + return 0;
55113 +}
55114 +
55115 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55116 +{
55117 + return dentry->d_inode->i_sb->s_dev;
55118 +}
55119 +
55120 +EXPORT_SYMBOL(gr_learn_resource);
55121 +EXPORT_SYMBOL(gr_set_kernel_label);
55122 +#ifdef CONFIG_SECURITY
55123 +EXPORT_SYMBOL(gr_check_user_change);
55124 +EXPORT_SYMBOL(gr_check_group_change);
55125 +#endif
55126 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
55127 new file mode 100644
55128 index 0000000..2b05ada
55129 --- /dev/null
55130 +++ b/grsecurity/grsec_exec.c
55131 @@ -0,0 +1,146 @@
55132 +#include <linux/kernel.h>
55133 +#include <linux/sched.h>
55134 +#include <linux/file.h>
55135 +#include <linux/binfmts.h>
55136 +#include <linux/fs.h>
55137 +#include <linux/types.h>
55138 +#include <linux/grdefs.h>
55139 +#include <linux/grsecurity.h>
55140 +#include <linux/grinternal.h>
55141 +#include <linux/capability.h>
55142 +#include <linux/module.h>
55143 +
55144 +#include <asm/uaccess.h>
55145 +
55146 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55147 +static char gr_exec_arg_buf[132];
55148 +static DEFINE_MUTEX(gr_exec_arg_mutex);
55149 +#endif
55150 +
55151 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55152 +
55153 +void
55154 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55155 +{
55156 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55157 + char *grarg = gr_exec_arg_buf;
55158 + unsigned int i, x, execlen = 0;
55159 + char c;
55160 +
55161 + if (!((grsec_enable_execlog && grsec_enable_group &&
55162 + in_group_p(grsec_audit_gid))
55163 + || (grsec_enable_execlog && !grsec_enable_group)))
55164 + return;
55165 +
55166 + mutex_lock(&gr_exec_arg_mutex);
55167 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
55168 +
55169 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
55170 + const char __user *p;
55171 + unsigned int len;
55172 +
55173 + p = get_user_arg_ptr(argv, i);
55174 + if (IS_ERR(p))
55175 + goto log;
55176 +
55177 + len = strnlen_user(p, 128 - execlen);
55178 + if (len > 128 - execlen)
55179 + len = 128 - execlen;
55180 + else if (len > 0)
55181 + len--;
55182 + if (copy_from_user(grarg + execlen, p, len))
55183 + goto log;
55184 +
55185 + /* rewrite unprintable characters */
55186 + for (x = 0; x < len; x++) {
55187 + c = *(grarg + execlen + x);
55188 + if (c < 32 || c > 126)
55189 + *(grarg + execlen + x) = ' ';
55190 + }
55191 +
55192 + execlen += len;
55193 + *(grarg + execlen) = ' ';
55194 + *(grarg + execlen + 1) = '\0';
55195 + execlen++;
55196 + }
55197 +
55198 + log:
55199 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55200 + bprm->file->f_path.mnt, grarg);
55201 + mutex_unlock(&gr_exec_arg_mutex);
55202 +#endif
55203 + return;
55204 +}
55205 +
55206 +#ifdef CONFIG_GRKERNSEC
55207 +extern int gr_acl_is_capable(const int cap);
55208 +extern int gr_acl_is_capable_nolog(const int cap);
55209 +extern int gr_chroot_is_capable(const int cap);
55210 +extern int gr_chroot_is_capable_nolog(const int cap);
55211 +#endif
55212 +
55213 +const char *captab_log[] = {
55214 + "CAP_CHOWN",
55215 + "CAP_DAC_OVERRIDE",
55216 + "CAP_DAC_READ_SEARCH",
55217 + "CAP_FOWNER",
55218 + "CAP_FSETID",
55219 + "CAP_KILL",
55220 + "CAP_SETGID",
55221 + "CAP_SETUID",
55222 + "CAP_SETPCAP",
55223 + "CAP_LINUX_IMMUTABLE",
55224 + "CAP_NET_BIND_SERVICE",
55225 + "CAP_NET_BROADCAST",
55226 + "CAP_NET_ADMIN",
55227 + "CAP_NET_RAW",
55228 + "CAP_IPC_LOCK",
55229 + "CAP_IPC_OWNER",
55230 + "CAP_SYS_MODULE",
55231 + "CAP_SYS_RAWIO",
55232 + "CAP_SYS_CHROOT",
55233 + "CAP_SYS_PTRACE",
55234 + "CAP_SYS_PACCT",
55235 + "CAP_SYS_ADMIN",
55236 + "CAP_SYS_BOOT",
55237 + "CAP_SYS_NICE",
55238 + "CAP_SYS_RESOURCE",
55239 + "CAP_SYS_TIME",
55240 + "CAP_SYS_TTY_CONFIG",
55241 + "CAP_MKNOD",
55242 + "CAP_LEASE",
55243 + "CAP_AUDIT_WRITE",
55244 + "CAP_AUDIT_CONTROL",
55245 + "CAP_SETFCAP",
55246 + "CAP_MAC_OVERRIDE",
55247 + "CAP_MAC_ADMIN",
55248 + "CAP_SYSLOG",
55249 + "CAP_WAKE_ALARM"
55250 +};
55251 +
55252 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55253 +
55254 +int gr_is_capable(const int cap)
55255 +{
55256 +#ifdef CONFIG_GRKERNSEC
55257 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55258 + return 1;
55259 + return 0;
55260 +#else
55261 + return 1;
55262 +#endif
55263 +}
55264 +
55265 +int gr_is_capable_nolog(const int cap)
55266 +{
55267 +#ifdef CONFIG_GRKERNSEC
55268 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55269 + return 1;
55270 + return 0;
55271 +#else
55272 + return 1;
55273 +#endif
55274 +}
55275 +
55276 +EXPORT_SYMBOL(gr_is_capable);
55277 +EXPORT_SYMBOL(gr_is_capable_nolog);
55278 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55279 new file mode 100644
55280 index 0000000..d3ee748
55281 --- /dev/null
55282 +++ b/grsecurity/grsec_fifo.c
55283 @@ -0,0 +1,24 @@
55284 +#include <linux/kernel.h>
55285 +#include <linux/sched.h>
55286 +#include <linux/fs.h>
55287 +#include <linux/file.h>
55288 +#include <linux/grinternal.h>
55289 +
55290 +int
55291 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55292 + const struct dentry *dir, const int flag, const int acc_mode)
55293 +{
55294 +#ifdef CONFIG_GRKERNSEC_FIFO
55295 + const struct cred *cred = current_cred();
55296 +
55297 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55298 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55299 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55300 + (cred->fsuid != dentry->d_inode->i_uid)) {
55301 + if (!inode_permission(dentry->d_inode, acc_mode))
55302 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55303 + return -EACCES;
55304 + }
55305 +#endif
55306 + return 0;
55307 +}
55308 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55309 new file mode 100644
55310 index 0000000..8ca18bf
55311 --- /dev/null
55312 +++ b/grsecurity/grsec_fork.c
55313 @@ -0,0 +1,23 @@
55314 +#include <linux/kernel.h>
55315 +#include <linux/sched.h>
55316 +#include <linux/grsecurity.h>
55317 +#include <linux/grinternal.h>
55318 +#include <linux/errno.h>
55319 +
55320 +void
55321 +gr_log_forkfail(const int retval)
55322 +{
55323 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55324 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55325 + switch (retval) {
55326 + case -EAGAIN:
55327 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55328 + break;
55329 + case -ENOMEM:
55330 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55331 + break;
55332 + }
55333 + }
55334 +#endif
55335 + return;
55336 +}
55337 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55338 new file mode 100644
55339 index 0000000..01ddde4
55340 --- /dev/null
55341 +++ b/grsecurity/grsec_init.c
55342 @@ -0,0 +1,277 @@
55343 +#include <linux/kernel.h>
55344 +#include <linux/sched.h>
55345 +#include <linux/mm.h>
55346 +#include <linux/gracl.h>
55347 +#include <linux/slab.h>
55348 +#include <linux/vmalloc.h>
55349 +#include <linux/percpu.h>
55350 +#include <linux/module.h>
55351 +
55352 +int grsec_enable_ptrace_readexec;
55353 +int grsec_enable_setxid;
55354 +int grsec_enable_brute;
55355 +int grsec_enable_link;
55356 +int grsec_enable_dmesg;
55357 +int grsec_enable_harden_ptrace;
55358 +int grsec_enable_fifo;
55359 +int grsec_enable_execlog;
55360 +int grsec_enable_signal;
55361 +int grsec_enable_forkfail;
55362 +int grsec_enable_audit_ptrace;
55363 +int grsec_enable_time;
55364 +int grsec_enable_audit_textrel;
55365 +int grsec_enable_group;
55366 +int grsec_audit_gid;
55367 +int grsec_enable_chdir;
55368 +int grsec_enable_mount;
55369 +int grsec_enable_rofs;
55370 +int grsec_enable_chroot_findtask;
55371 +int grsec_enable_chroot_mount;
55372 +int grsec_enable_chroot_shmat;
55373 +int grsec_enable_chroot_fchdir;
55374 +int grsec_enable_chroot_double;
55375 +int grsec_enable_chroot_pivot;
55376 +int grsec_enable_chroot_chdir;
55377 +int grsec_enable_chroot_chmod;
55378 +int grsec_enable_chroot_mknod;
55379 +int grsec_enable_chroot_nice;
55380 +int grsec_enable_chroot_execlog;
55381 +int grsec_enable_chroot_caps;
55382 +int grsec_enable_chroot_sysctl;
55383 +int grsec_enable_chroot_unix;
55384 +int grsec_enable_tpe;
55385 +int grsec_tpe_gid;
55386 +int grsec_enable_blackhole;
55387 +#ifdef CONFIG_IPV6_MODULE
55388 +EXPORT_SYMBOL(grsec_enable_blackhole);
55389 +#endif
55390 +int grsec_lastack_retries;
55391 +int grsec_enable_tpe_all;
55392 +int grsec_enable_tpe_invert;
55393 +int grsec_enable_socket_all;
55394 +int grsec_socket_all_gid;
55395 +int grsec_enable_socket_client;
55396 +int grsec_socket_client_gid;
55397 +int grsec_enable_socket_server;
55398 +int grsec_socket_server_gid;
55399 +int grsec_resource_logging;
55400 +int grsec_disable_privio;
55401 +int grsec_enable_log_rwxmaps;
55402 +int grsec_lock;
55403 +
55404 +DEFINE_SPINLOCK(grsec_alert_lock);
55405 +unsigned long grsec_alert_wtime = 0;
55406 +unsigned long grsec_alert_fyet = 0;
55407 +
55408 +DEFINE_SPINLOCK(grsec_audit_lock);
55409 +
55410 +DEFINE_RWLOCK(grsec_exec_file_lock);
55411 +
55412 +char *gr_shared_page[4];
55413 +
55414 +char *gr_alert_log_fmt;
55415 +char *gr_audit_log_fmt;
55416 +char *gr_alert_log_buf;
55417 +char *gr_audit_log_buf;
55418 +
55419 +extern struct gr_arg *gr_usermode;
55420 +extern unsigned char *gr_system_salt;
55421 +extern unsigned char *gr_system_sum;
55422 +
55423 +void __init
55424 +grsecurity_init(void)
55425 +{
55426 + int j;
55427 + /* create the per-cpu shared pages */
55428 +
55429 +#ifdef CONFIG_X86
55430 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55431 +#endif
55432 +
55433 + for (j = 0; j < 4; j++) {
55434 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55435 + if (gr_shared_page[j] == NULL) {
55436 + panic("Unable to allocate grsecurity shared page");
55437 + return;
55438 + }
55439 + }
55440 +
55441 + /* allocate log buffers */
55442 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55443 + if (!gr_alert_log_fmt) {
55444 + panic("Unable to allocate grsecurity alert log format buffer");
55445 + return;
55446 + }
55447 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55448 + if (!gr_audit_log_fmt) {
55449 + panic("Unable to allocate grsecurity audit log format buffer");
55450 + return;
55451 + }
55452 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55453 + if (!gr_alert_log_buf) {
55454 + panic("Unable to allocate grsecurity alert log buffer");
55455 + return;
55456 + }
55457 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55458 + if (!gr_audit_log_buf) {
55459 + panic("Unable to allocate grsecurity audit log buffer");
55460 + return;
55461 + }
55462 +
55463 + /* allocate memory for authentication structure */
55464 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55465 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55466 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55467 +
55468 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55469 + panic("Unable to allocate grsecurity authentication structure");
55470 + return;
55471 + }
55472 +
55473 +
55474 +#ifdef CONFIG_GRKERNSEC_IO
55475 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55476 + grsec_disable_privio = 1;
55477 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55478 + grsec_disable_privio = 1;
55479 +#else
55480 + grsec_disable_privio = 0;
55481 +#endif
55482 +#endif
55483 +
55484 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55485 + /* for backward compatibility, tpe_invert always defaults to on if
55486 + enabled in the kernel
55487 + */
55488 + grsec_enable_tpe_invert = 1;
55489 +#endif
55490 +
55491 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55492 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55493 + grsec_lock = 1;
55494 +#endif
55495 +
55496 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55497 + grsec_enable_audit_textrel = 1;
55498 +#endif
55499 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55500 + grsec_enable_log_rwxmaps = 1;
55501 +#endif
55502 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55503 + grsec_enable_group = 1;
55504 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55505 +#endif
55506 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55507 + grsec_enable_ptrace_readexec = 1;
55508 +#endif
55509 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55510 + grsec_enable_chdir = 1;
55511 +#endif
55512 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55513 + grsec_enable_harden_ptrace = 1;
55514 +#endif
55515 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55516 + grsec_enable_mount = 1;
55517 +#endif
55518 +#ifdef CONFIG_GRKERNSEC_LINK
55519 + grsec_enable_link = 1;
55520 +#endif
55521 +#ifdef CONFIG_GRKERNSEC_BRUTE
55522 + grsec_enable_brute = 1;
55523 +#endif
55524 +#ifdef CONFIG_GRKERNSEC_DMESG
55525 + grsec_enable_dmesg = 1;
55526 +#endif
55527 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55528 + grsec_enable_blackhole = 1;
55529 + grsec_lastack_retries = 4;
55530 +#endif
55531 +#ifdef CONFIG_GRKERNSEC_FIFO
55532 + grsec_enable_fifo = 1;
55533 +#endif
55534 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55535 + grsec_enable_execlog = 1;
55536 +#endif
55537 +#ifdef CONFIG_GRKERNSEC_SETXID
55538 + grsec_enable_setxid = 1;
55539 +#endif
55540 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55541 + grsec_enable_signal = 1;
55542 +#endif
55543 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55544 + grsec_enable_forkfail = 1;
55545 +#endif
55546 +#ifdef CONFIG_GRKERNSEC_TIME
55547 + grsec_enable_time = 1;
55548 +#endif
55549 +#ifdef CONFIG_GRKERNSEC_RESLOG
55550 + grsec_resource_logging = 1;
55551 +#endif
55552 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55553 + grsec_enable_chroot_findtask = 1;
55554 +#endif
55555 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55556 + grsec_enable_chroot_unix = 1;
55557 +#endif
55558 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55559 + grsec_enable_chroot_mount = 1;
55560 +#endif
55561 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55562 + grsec_enable_chroot_fchdir = 1;
55563 +#endif
55564 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55565 + grsec_enable_chroot_shmat = 1;
55566 +#endif
55567 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55568 + grsec_enable_audit_ptrace = 1;
55569 +#endif
55570 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55571 + grsec_enable_chroot_double = 1;
55572 +#endif
55573 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55574 + grsec_enable_chroot_pivot = 1;
55575 +#endif
55576 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55577 + grsec_enable_chroot_chdir = 1;
55578 +#endif
55579 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55580 + grsec_enable_chroot_chmod = 1;
55581 +#endif
55582 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55583 + grsec_enable_chroot_mknod = 1;
55584 +#endif
55585 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55586 + grsec_enable_chroot_nice = 1;
55587 +#endif
55588 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55589 + grsec_enable_chroot_execlog = 1;
55590 +#endif
55591 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55592 + grsec_enable_chroot_caps = 1;
55593 +#endif
55594 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55595 + grsec_enable_chroot_sysctl = 1;
55596 +#endif
55597 +#ifdef CONFIG_GRKERNSEC_TPE
55598 + grsec_enable_tpe = 1;
55599 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55600 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55601 + grsec_enable_tpe_all = 1;
55602 +#endif
55603 +#endif
55604 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55605 + grsec_enable_socket_all = 1;
55606 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55607 +#endif
55608 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55609 + grsec_enable_socket_client = 1;
55610 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55611 +#endif
55612 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55613 + grsec_enable_socket_server = 1;
55614 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55615 +#endif
55616 +#endif
55617 +
55618 + return;
55619 +}
55620 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55621 new file mode 100644
55622 index 0000000..3efe141
55623 --- /dev/null
55624 +++ b/grsecurity/grsec_link.c
55625 @@ -0,0 +1,43 @@
55626 +#include <linux/kernel.h>
55627 +#include <linux/sched.h>
55628 +#include <linux/fs.h>
55629 +#include <linux/file.h>
55630 +#include <linux/grinternal.h>
55631 +
55632 +int
55633 +gr_handle_follow_link(const struct inode *parent,
55634 + const struct inode *inode,
55635 + const struct dentry *dentry, const struct vfsmount *mnt)
55636 +{
55637 +#ifdef CONFIG_GRKERNSEC_LINK
55638 + const struct cred *cred = current_cred();
55639 +
55640 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55641 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55642 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55643 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55644 + return -EACCES;
55645 + }
55646 +#endif
55647 + return 0;
55648 +}
55649 +
55650 +int
55651 +gr_handle_hardlink(const struct dentry *dentry,
55652 + const struct vfsmount *mnt,
55653 + struct inode *inode, const int mode, const char *to)
55654 +{
55655 +#ifdef CONFIG_GRKERNSEC_LINK
55656 + const struct cred *cred = current_cred();
55657 +
55658 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55659 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55660 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55661 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55662 + !capable(CAP_FOWNER) && cred->uid) {
55663 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55664 + return -EPERM;
55665 + }
55666 +#endif
55667 + return 0;
55668 +}
55669 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55670 new file mode 100644
55671 index 0000000..a45d2e9
55672 --- /dev/null
55673 +++ b/grsecurity/grsec_log.c
55674 @@ -0,0 +1,322 @@
55675 +#include <linux/kernel.h>
55676 +#include <linux/sched.h>
55677 +#include <linux/file.h>
55678 +#include <linux/tty.h>
55679 +#include <linux/fs.h>
55680 +#include <linux/grinternal.h>
55681 +
55682 +#ifdef CONFIG_TREE_PREEMPT_RCU
55683 +#define DISABLE_PREEMPT() preempt_disable()
55684 +#define ENABLE_PREEMPT() preempt_enable()
55685 +#else
55686 +#define DISABLE_PREEMPT()
55687 +#define ENABLE_PREEMPT()
55688 +#endif
55689 +
55690 +#define BEGIN_LOCKS(x) \
55691 + DISABLE_PREEMPT(); \
55692 + rcu_read_lock(); \
55693 + read_lock(&tasklist_lock); \
55694 + read_lock(&grsec_exec_file_lock); \
55695 + if (x != GR_DO_AUDIT) \
55696 + spin_lock(&grsec_alert_lock); \
55697 + else \
55698 + spin_lock(&grsec_audit_lock)
55699 +
55700 +#define END_LOCKS(x) \
55701 + if (x != GR_DO_AUDIT) \
55702 + spin_unlock(&grsec_alert_lock); \
55703 + else \
55704 + spin_unlock(&grsec_audit_lock); \
55705 + read_unlock(&grsec_exec_file_lock); \
55706 + read_unlock(&tasklist_lock); \
55707 + rcu_read_unlock(); \
55708 + ENABLE_PREEMPT(); \
55709 + if (x == GR_DONT_AUDIT) \
55710 + gr_handle_alertkill(current)
55711 +
55712 +enum {
55713 + FLOODING,
55714 + NO_FLOODING
55715 +};
55716 +
55717 +extern char *gr_alert_log_fmt;
55718 +extern char *gr_audit_log_fmt;
55719 +extern char *gr_alert_log_buf;
55720 +extern char *gr_audit_log_buf;
55721 +
55722 +static int gr_log_start(int audit)
55723 +{
55724 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55725 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55726 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55727 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55728 + unsigned long curr_secs = get_seconds();
55729 +
55730 + if (audit == GR_DO_AUDIT)
55731 + goto set_fmt;
55732 +
55733 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55734 + grsec_alert_wtime = curr_secs;
55735 + grsec_alert_fyet = 0;
55736 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55737 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55738 + grsec_alert_fyet++;
55739 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55740 + grsec_alert_wtime = curr_secs;
55741 + grsec_alert_fyet++;
55742 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55743 + return FLOODING;
55744 + }
55745 + else return FLOODING;
55746 +
55747 +set_fmt:
55748 +#endif
55749 + memset(buf, 0, PAGE_SIZE);
55750 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55751 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55752 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55753 + } else if (current->signal->curr_ip) {
55754 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55755 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55756 + } else if (gr_acl_is_enabled()) {
55757 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55758 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55759 + } else {
55760 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55761 + strcpy(buf, fmt);
55762 + }
55763 +
55764 + return NO_FLOODING;
55765 +}
55766 +
55767 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55768 + __attribute__ ((format (printf, 2, 0)));
55769 +
55770 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55771 +{
55772 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55773 + unsigned int len = strlen(buf);
55774 +
55775 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55776 +
55777 + return;
55778 +}
55779 +
55780 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55781 + __attribute__ ((format (printf, 2, 3)));
55782 +
55783 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55784 +{
55785 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55786 + unsigned int len = strlen(buf);
55787 + va_list ap;
55788 +
55789 + va_start(ap, msg);
55790 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55791 + va_end(ap);
55792 +
55793 + return;
55794 +}
55795 +
55796 +static void gr_log_end(int audit, int append_default)
55797 +{
55798 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55799 +
55800 + if (append_default) {
55801 + unsigned int len = strlen(buf);
55802 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55803 + }
55804 +
55805 + printk("%s\n", buf);
55806 +
55807 + return;
55808 +}
55809 +
55810 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55811 +{
55812 + int logtype;
55813 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55814 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55815 + void *voidptr = NULL;
55816 + int num1 = 0, num2 = 0;
55817 + unsigned long ulong1 = 0, ulong2 = 0;
55818 + struct dentry *dentry = NULL;
55819 + struct vfsmount *mnt = NULL;
55820 + struct file *file = NULL;
55821 + struct task_struct *task = NULL;
55822 + const struct cred *cred, *pcred;
55823 + va_list ap;
55824 +
55825 + BEGIN_LOCKS(audit);
55826 + logtype = gr_log_start(audit);
55827 + if (logtype == FLOODING) {
55828 + END_LOCKS(audit);
55829 + return;
55830 + }
55831 + va_start(ap, argtypes);
55832 + switch (argtypes) {
55833 + case GR_TTYSNIFF:
55834 + task = va_arg(ap, struct task_struct *);
55835 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55836 + break;
55837 + case GR_SYSCTL_HIDDEN:
55838 + str1 = va_arg(ap, char *);
55839 + gr_log_middle_varargs(audit, msg, result, str1);
55840 + break;
55841 + case GR_RBAC:
55842 + dentry = va_arg(ap, struct dentry *);
55843 + mnt = va_arg(ap, struct vfsmount *);
55844 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55845 + break;
55846 + case GR_RBAC_STR:
55847 + dentry = va_arg(ap, struct dentry *);
55848 + mnt = va_arg(ap, struct vfsmount *);
55849 + str1 = va_arg(ap, char *);
55850 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55851 + break;
55852 + case GR_STR_RBAC:
55853 + str1 = va_arg(ap, char *);
55854 + dentry = va_arg(ap, struct dentry *);
55855 + mnt = va_arg(ap, struct vfsmount *);
55856 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55857 + break;
55858 + case GR_RBAC_MODE2:
55859 + dentry = va_arg(ap, struct dentry *);
55860 + mnt = va_arg(ap, struct vfsmount *);
55861 + str1 = va_arg(ap, char *);
55862 + str2 = va_arg(ap, char *);
55863 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55864 + break;
55865 + case GR_RBAC_MODE3:
55866 + dentry = va_arg(ap, struct dentry *);
55867 + mnt = va_arg(ap, struct vfsmount *);
55868 + str1 = va_arg(ap, char *);
55869 + str2 = va_arg(ap, char *);
55870 + str3 = va_arg(ap, char *);
55871 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55872 + break;
55873 + case GR_FILENAME:
55874 + dentry = va_arg(ap, struct dentry *);
55875 + mnt = va_arg(ap, struct vfsmount *);
55876 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55877 + break;
55878 + case GR_STR_FILENAME:
55879 + str1 = va_arg(ap, char *);
55880 + dentry = va_arg(ap, struct dentry *);
55881 + mnt = va_arg(ap, struct vfsmount *);
55882 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55883 + break;
55884 + case GR_FILENAME_STR:
55885 + dentry = va_arg(ap, struct dentry *);
55886 + mnt = va_arg(ap, struct vfsmount *);
55887 + str1 = va_arg(ap, char *);
55888 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55889 + break;
55890 + case GR_FILENAME_TWO_INT:
55891 + dentry = va_arg(ap, struct dentry *);
55892 + mnt = va_arg(ap, struct vfsmount *);
55893 + num1 = va_arg(ap, int);
55894 + num2 = va_arg(ap, int);
55895 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55896 + break;
55897 + case GR_FILENAME_TWO_INT_STR:
55898 + dentry = va_arg(ap, struct dentry *);
55899 + mnt = va_arg(ap, struct vfsmount *);
55900 + num1 = va_arg(ap, int);
55901 + num2 = va_arg(ap, int);
55902 + str1 = va_arg(ap, char *);
55903 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55904 + break;
55905 + case GR_TEXTREL:
55906 + file = va_arg(ap, struct file *);
55907 + ulong1 = va_arg(ap, unsigned long);
55908 + ulong2 = va_arg(ap, unsigned long);
55909 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55910 + break;
55911 + case GR_PTRACE:
55912 + task = va_arg(ap, struct task_struct *);
55913 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55914 + break;
55915 + case GR_RESOURCE:
55916 + task = va_arg(ap, struct task_struct *);
55917 + cred = __task_cred(task);
55918 + pcred = __task_cred(task->real_parent);
55919 + ulong1 = va_arg(ap, unsigned long);
55920 + str1 = va_arg(ap, char *);
55921 + ulong2 = va_arg(ap, unsigned long);
55922 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55923 + break;
55924 + case GR_CAP:
55925 + task = va_arg(ap, struct task_struct *);
55926 + cred = __task_cred(task);
55927 + pcred = __task_cred(task->real_parent);
55928 + str1 = va_arg(ap, char *);
55929 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55930 + break;
55931 + case GR_SIG:
55932 + str1 = va_arg(ap, char *);
55933 + voidptr = va_arg(ap, void *);
55934 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55935 + break;
55936 + case GR_SIG2:
55937 + task = va_arg(ap, struct task_struct *);
55938 + cred = __task_cred(task);
55939 + pcred = __task_cred(task->real_parent);
55940 + num1 = va_arg(ap, int);
55941 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55942 + break;
55943 + case GR_CRASH1:
55944 + task = va_arg(ap, struct task_struct *);
55945 + cred = __task_cred(task);
55946 + pcred = __task_cred(task->real_parent);
55947 + ulong1 = va_arg(ap, unsigned long);
55948 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55949 + break;
55950 + case GR_CRASH2:
55951 + task = va_arg(ap, struct task_struct *);
55952 + cred = __task_cred(task);
55953 + pcred = __task_cred(task->real_parent);
55954 + ulong1 = va_arg(ap, unsigned long);
55955 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55956 + break;
55957 + case GR_RWXMAP:
55958 + file = va_arg(ap, struct file *);
55959 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55960 + break;
55961 + case GR_PSACCT:
55962 + {
55963 + unsigned int wday, cday;
55964 + __u8 whr, chr;
55965 + __u8 wmin, cmin;
55966 + __u8 wsec, csec;
55967 + char cur_tty[64] = { 0 };
55968 + char parent_tty[64] = { 0 };
55969 +
55970 + task = va_arg(ap, struct task_struct *);
55971 + wday = va_arg(ap, unsigned int);
55972 + cday = va_arg(ap, unsigned int);
55973 + whr = va_arg(ap, int);
55974 + chr = va_arg(ap, int);
55975 + wmin = va_arg(ap, int);
55976 + cmin = va_arg(ap, int);
55977 + wsec = va_arg(ap, int);
55978 + csec = va_arg(ap, int);
55979 + ulong1 = va_arg(ap, unsigned long);
55980 + cred = __task_cred(task);
55981 + pcred = __task_cred(task->real_parent);
55982 +
55983 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55984 + }
55985 + break;
55986 + default:
55987 + gr_log_middle(audit, msg, ap);
55988 + }
55989 + va_end(ap);
55990 + // these don't need DEFAULTSECARGS printed on the end
55991 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55992 + gr_log_end(audit, 0);
55993 + else
55994 + gr_log_end(audit, 1);
55995 + END_LOCKS(audit);
55996 +}
55997 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55998 new file mode 100644
55999 index 0000000..f536303
56000 --- /dev/null
56001 +++ b/grsecurity/grsec_mem.c
56002 @@ -0,0 +1,40 @@
56003 +#include <linux/kernel.h>
56004 +#include <linux/sched.h>
56005 +#include <linux/mm.h>
56006 +#include <linux/mman.h>
56007 +#include <linux/grinternal.h>
56008 +
56009 +void
56010 +gr_handle_ioperm(void)
56011 +{
56012 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
56013 + return;
56014 +}
56015 +
56016 +void
56017 +gr_handle_iopl(void)
56018 +{
56019 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
56020 + return;
56021 +}
56022 +
56023 +void
56024 +gr_handle_mem_readwrite(u64 from, u64 to)
56025 +{
56026 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
56027 + return;
56028 +}
56029 +
56030 +void
56031 +gr_handle_vm86(void)
56032 +{
56033 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
56034 + return;
56035 +}
56036 +
56037 +void
56038 +gr_log_badprocpid(const char *entry)
56039 +{
56040 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
56041 + return;
56042 +}
56043 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
56044 new file mode 100644
56045 index 0000000..2131422
56046 --- /dev/null
56047 +++ b/grsecurity/grsec_mount.c
56048 @@ -0,0 +1,62 @@
56049 +#include <linux/kernel.h>
56050 +#include <linux/sched.h>
56051 +#include <linux/mount.h>
56052 +#include <linux/grsecurity.h>
56053 +#include <linux/grinternal.h>
56054 +
56055 +void
56056 +gr_log_remount(const char *devname, const int retval)
56057 +{
56058 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56059 + if (grsec_enable_mount && (retval >= 0))
56060 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
56061 +#endif
56062 + return;
56063 +}
56064 +
56065 +void
56066 +gr_log_unmount(const char *devname, const int retval)
56067 +{
56068 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56069 + if (grsec_enable_mount && (retval >= 0))
56070 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
56071 +#endif
56072 + return;
56073 +}
56074 +
56075 +void
56076 +gr_log_mount(const char *from, const char *to, const int retval)
56077 +{
56078 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56079 + if (grsec_enable_mount && (retval >= 0))
56080 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
56081 +#endif
56082 + return;
56083 +}
56084 +
56085 +int
56086 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
56087 +{
56088 +#ifdef CONFIG_GRKERNSEC_ROFS
56089 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
56090 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
56091 + return -EPERM;
56092 + } else
56093 + return 0;
56094 +#endif
56095 + return 0;
56096 +}
56097 +
56098 +int
56099 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
56100 +{
56101 +#ifdef CONFIG_GRKERNSEC_ROFS
56102 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
56103 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
56104 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
56105 + return -EPERM;
56106 + } else
56107 + return 0;
56108 +#endif
56109 + return 0;
56110 +}
56111 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
56112 new file mode 100644
56113 index 0000000..a3b12a0
56114 --- /dev/null
56115 +++ b/grsecurity/grsec_pax.c
56116 @@ -0,0 +1,36 @@
56117 +#include <linux/kernel.h>
56118 +#include <linux/sched.h>
56119 +#include <linux/mm.h>
56120 +#include <linux/file.h>
56121 +#include <linux/grinternal.h>
56122 +#include <linux/grsecurity.h>
56123 +
56124 +void
56125 +gr_log_textrel(struct vm_area_struct * vma)
56126 +{
56127 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56128 + if (grsec_enable_audit_textrel)
56129 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
56130 +#endif
56131 + return;
56132 +}
56133 +
56134 +void
56135 +gr_log_rwxmmap(struct file *file)
56136 +{
56137 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56138 + if (grsec_enable_log_rwxmaps)
56139 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
56140 +#endif
56141 + return;
56142 +}
56143 +
56144 +void
56145 +gr_log_rwxmprotect(struct file *file)
56146 +{
56147 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56148 + if (grsec_enable_log_rwxmaps)
56149 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
56150 +#endif
56151 + return;
56152 +}
56153 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
56154 new file mode 100644
56155 index 0000000..f7f29aa
56156 --- /dev/null
56157 +++ b/grsecurity/grsec_ptrace.c
56158 @@ -0,0 +1,30 @@
56159 +#include <linux/kernel.h>
56160 +#include <linux/sched.h>
56161 +#include <linux/grinternal.h>
56162 +#include <linux/security.h>
56163 +
56164 +void
56165 +gr_audit_ptrace(struct task_struct *task)
56166 +{
56167 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56168 + if (grsec_enable_audit_ptrace)
56169 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56170 +#endif
56171 + return;
56172 +}
56173 +
56174 +int
56175 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
56176 +{
56177 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56178 + const struct dentry *dentry = file->f_path.dentry;
56179 + const struct vfsmount *mnt = file->f_path.mnt;
56180 +
56181 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56182 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56183 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56184 + return -EACCES;
56185 + }
56186 +#endif
56187 + return 0;
56188 +}
56189 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56190 new file mode 100644
56191 index 0000000..7a5b2de
56192 --- /dev/null
56193 +++ b/grsecurity/grsec_sig.c
56194 @@ -0,0 +1,207 @@
56195 +#include <linux/kernel.h>
56196 +#include <linux/sched.h>
56197 +#include <linux/delay.h>
56198 +#include <linux/grsecurity.h>
56199 +#include <linux/grinternal.h>
56200 +#include <linux/hardirq.h>
56201 +
56202 +char *signames[] = {
56203 + [SIGSEGV] = "Segmentation fault",
56204 + [SIGILL] = "Illegal instruction",
56205 + [SIGABRT] = "Abort",
56206 + [SIGBUS] = "Invalid alignment/Bus error"
56207 +};
56208 +
56209 +void
56210 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56211 +{
56212 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56213 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56214 + (sig == SIGABRT) || (sig == SIGBUS))) {
56215 + if (t->pid == current->pid) {
56216 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56217 + } else {
56218 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56219 + }
56220 + }
56221 +#endif
56222 + return;
56223 +}
56224 +
56225 +int
56226 +gr_handle_signal(const struct task_struct *p, const int sig)
56227 +{
56228 +#ifdef CONFIG_GRKERNSEC
56229 + /* ignore the 0 signal for protected task checks */
56230 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56231 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56232 + return -EPERM;
56233 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56234 + return -EPERM;
56235 + }
56236 +#endif
56237 + return 0;
56238 +}
56239 +
56240 +#ifdef CONFIG_GRKERNSEC
56241 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56242 +
56243 +int gr_fake_force_sig(int sig, struct task_struct *t)
56244 +{
56245 + unsigned long int flags;
56246 + int ret, blocked, ignored;
56247 + struct k_sigaction *action;
56248 +
56249 + spin_lock_irqsave(&t->sighand->siglock, flags);
56250 + action = &t->sighand->action[sig-1];
56251 + ignored = action->sa.sa_handler == SIG_IGN;
56252 + blocked = sigismember(&t->blocked, sig);
56253 + if (blocked || ignored) {
56254 + action->sa.sa_handler = SIG_DFL;
56255 + if (blocked) {
56256 + sigdelset(&t->blocked, sig);
56257 + recalc_sigpending_and_wake(t);
56258 + }
56259 + }
56260 + if (action->sa.sa_handler == SIG_DFL)
56261 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56262 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56263 +
56264 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56265 +
56266 + return ret;
56267 +}
56268 +#endif
56269 +
56270 +#ifdef CONFIG_GRKERNSEC_BRUTE
56271 +#define GR_USER_BAN_TIME (15 * 60)
56272 +
56273 +static int __get_dumpable(unsigned long mm_flags)
56274 +{
56275 + int ret;
56276 +
56277 + ret = mm_flags & MMF_DUMPABLE_MASK;
56278 + return (ret >= 2) ? 2 : ret;
56279 +}
56280 +#endif
56281 +
56282 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56283 +{
56284 +#ifdef CONFIG_GRKERNSEC_BRUTE
56285 + uid_t uid = 0;
56286 +
56287 + if (!grsec_enable_brute)
56288 + return;
56289 +
56290 + rcu_read_lock();
56291 + read_lock(&tasklist_lock);
56292 + read_lock(&grsec_exec_file_lock);
56293 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56294 + p->real_parent->brute = 1;
56295 + else {
56296 + const struct cred *cred = __task_cred(p), *cred2;
56297 + struct task_struct *tsk, *tsk2;
56298 +
56299 + if (!__get_dumpable(mm_flags) && cred->uid) {
56300 + struct user_struct *user;
56301 +
56302 + uid = cred->uid;
56303 +
56304 + /* this is put upon execution past expiration */
56305 + user = find_user(uid);
56306 + if (user == NULL)
56307 + goto unlock;
56308 + user->banned = 1;
56309 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56310 + if (user->ban_expires == ~0UL)
56311 + user->ban_expires--;
56312 +
56313 + do_each_thread(tsk2, tsk) {
56314 + cred2 = __task_cred(tsk);
56315 + if (tsk != p && cred2->uid == uid)
56316 + gr_fake_force_sig(SIGKILL, tsk);
56317 + } while_each_thread(tsk2, tsk);
56318 + }
56319 + }
56320 +unlock:
56321 + read_unlock(&grsec_exec_file_lock);
56322 + read_unlock(&tasklist_lock);
56323 + rcu_read_unlock();
56324 +
56325 + if (uid)
56326 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56327 +
56328 +#endif
56329 + return;
56330 +}
56331 +
56332 +void gr_handle_brute_check(void)
56333 +{
56334 +#ifdef CONFIG_GRKERNSEC_BRUTE
56335 + if (current->brute)
56336 + msleep(30 * 1000);
56337 +#endif
56338 + return;
56339 +}
56340 +
56341 +void gr_handle_kernel_exploit(void)
56342 +{
56343 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56344 + const struct cred *cred;
56345 + struct task_struct *tsk, *tsk2;
56346 + struct user_struct *user;
56347 + uid_t uid;
56348 +
56349 + if (in_irq() || in_serving_softirq() || in_nmi())
56350 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56351 +
56352 + uid = current_uid();
56353 +
56354 + if (uid == 0)
56355 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56356 + else {
56357 + /* kill all the processes of this user, hold a reference
56358 + to their creds struct, and prevent them from creating
56359 + another process until system reset
56360 + */
56361 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56362 + /* we intentionally leak this ref */
56363 + user = get_uid(current->cred->user);
56364 + if (user) {
56365 + user->banned = 1;
56366 + user->ban_expires = ~0UL;
56367 + }
56368 +
56369 + read_lock(&tasklist_lock);
56370 + do_each_thread(tsk2, tsk) {
56371 + cred = __task_cred(tsk);
56372 + if (cred->uid == uid)
56373 + gr_fake_force_sig(SIGKILL, tsk);
56374 + } while_each_thread(tsk2, tsk);
56375 + read_unlock(&tasklist_lock);
56376 + }
56377 +#endif
56378 +}
56379 +
56380 +int __gr_process_user_ban(struct user_struct *user)
56381 +{
56382 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56383 + if (unlikely(user->banned)) {
56384 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56385 + user->banned = 0;
56386 + user->ban_expires = 0;
56387 + free_uid(user);
56388 + } else
56389 + return -EPERM;
56390 + }
56391 +#endif
56392 + return 0;
56393 +}
56394 +
56395 +int gr_process_user_ban(void)
56396 +{
56397 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56398 + return __gr_process_user_ban(current->cred->user);
56399 +#endif
56400 + return 0;
56401 +}
56402 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56403 new file mode 100644
56404 index 0000000..4030d57
56405 --- /dev/null
56406 +++ b/grsecurity/grsec_sock.c
56407 @@ -0,0 +1,244 @@
56408 +#include <linux/kernel.h>
56409 +#include <linux/module.h>
56410 +#include <linux/sched.h>
56411 +#include <linux/file.h>
56412 +#include <linux/net.h>
56413 +#include <linux/in.h>
56414 +#include <linux/ip.h>
56415 +#include <net/sock.h>
56416 +#include <net/inet_sock.h>
56417 +#include <linux/grsecurity.h>
56418 +#include <linux/grinternal.h>
56419 +#include <linux/gracl.h>
56420 +
56421 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56422 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56423 +
56424 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56425 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56426 +
56427 +#ifdef CONFIG_UNIX_MODULE
56428 +EXPORT_SYMBOL(gr_acl_handle_unix);
56429 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56430 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56431 +EXPORT_SYMBOL(gr_handle_create);
56432 +#endif
56433 +
56434 +#ifdef CONFIG_GRKERNSEC
56435 +#define gr_conn_table_size 32749
56436 +struct conn_table_entry {
56437 + struct conn_table_entry *next;
56438 + struct signal_struct *sig;
56439 +};
56440 +
56441 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56442 +DEFINE_SPINLOCK(gr_conn_table_lock);
56443 +
56444 +extern const char * gr_socktype_to_name(unsigned char type);
56445 +extern const char * gr_proto_to_name(unsigned char proto);
56446 +extern const char * gr_sockfamily_to_name(unsigned char family);
56447 +
56448 +static __inline__ int
56449 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56450 +{
56451 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56452 +}
56453 +
56454 +static __inline__ int
56455 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56456 + __u16 sport, __u16 dport)
56457 +{
56458 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56459 + sig->gr_sport == sport && sig->gr_dport == dport))
56460 + return 1;
56461 + else
56462 + return 0;
56463 +}
56464 +
56465 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56466 +{
56467 + struct conn_table_entry **match;
56468 + unsigned int index;
56469 +
56470 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56471 + sig->gr_sport, sig->gr_dport,
56472 + gr_conn_table_size);
56473 +
56474 + newent->sig = sig;
56475 +
56476 + match = &gr_conn_table[index];
56477 + newent->next = *match;
56478 + *match = newent;
56479 +
56480 + return;
56481 +}
56482 +
56483 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56484 +{
56485 + struct conn_table_entry *match, *last = NULL;
56486 + unsigned int index;
56487 +
56488 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56489 + sig->gr_sport, sig->gr_dport,
56490 + gr_conn_table_size);
56491 +
56492 + match = gr_conn_table[index];
56493 + while (match && !conn_match(match->sig,
56494 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56495 + sig->gr_dport)) {
56496 + last = match;
56497 + match = match->next;
56498 + }
56499 +
56500 + if (match) {
56501 + if (last)
56502 + last->next = match->next;
56503 + else
56504 + gr_conn_table[index] = NULL;
56505 + kfree(match);
56506 + }
56507 +
56508 + return;
56509 +}
56510 +
56511 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56512 + __u16 sport, __u16 dport)
56513 +{
56514 + struct conn_table_entry *match;
56515 + unsigned int index;
56516 +
56517 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56518 +
56519 + match = gr_conn_table[index];
56520 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56521 + match = match->next;
56522 +
56523 + if (match)
56524 + return match->sig;
56525 + else
56526 + return NULL;
56527 +}
56528 +
56529 +#endif
56530 +
56531 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56532 +{
56533 +#ifdef CONFIG_GRKERNSEC
56534 + struct signal_struct *sig = task->signal;
56535 + struct conn_table_entry *newent;
56536 +
56537 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56538 + if (newent == NULL)
56539 + return;
56540 + /* no bh lock needed since we are called with bh disabled */
56541 + spin_lock(&gr_conn_table_lock);
56542 + gr_del_task_from_ip_table_nolock(sig);
56543 + sig->gr_saddr = inet->inet_rcv_saddr;
56544 + sig->gr_daddr = inet->inet_daddr;
56545 + sig->gr_sport = inet->inet_sport;
56546 + sig->gr_dport = inet->inet_dport;
56547 + gr_add_to_task_ip_table_nolock(sig, newent);
56548 + spin_unlock(&gr_conn_table_lock);
56549 +#endif
56550 + return;
56551 +}
56552 +
56553 +void gr_del_task_from_ip_table(struct task_struct *task)
56554 +{
56555 +#ifdef CONFIG_GRKERNSEC
56556 + spin_lock_bh(&gr_conn_table_lock);
56557 + gr_del_task_from_ip_table_nolock(task->signal);
56558 + spin_unlock_bh(&gr_conn_table_lock);
56559 +#endif
56560 + return;
56561 +}
56562 +
56563 +void
56564 +gr_attach_curr_ip(const struct sock *sk)
56565 +{
56566 +#ifdef CONFIG_GRKERNSEC
56567 + struct signal_struct *p, *set;
56568 + const struct inet_sock *inet = inet_sk(sk);
56569 +
56570 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56571 + return;
56572 +
56573 + set = current->signal;
56574 +
56575 + spin_lock_bh(&gr_conn_table_lock);
56576 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56577 + inet->inet_dport, inet->inet_sport);
56578 + if (unlikely(p != NULL)) {
56579 + set->curr_ip = p->curr_ip;
56580 + set->used_accept = 1;
56581 + gr_del_task_from_ip_table_nolock(p);
56582 + spin_unlock_bh(&gr_conn_table_lock);
56583 + return;
56584 + }
56585 + spin_unlock_bh(&gr_conn_table_lock);
56586 +
56587 + set->curr_ip = inet->inet_daddr;
56588 + set->used_accept = 1;
56589 +#endif
56590 + return;
56591 +}
56592 +
56593 +int
56594 +gr_handle_sock_all(const int family, const int type, const int protocol)
56595 +{
56596 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56597 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56598 + (family != AF_UNIX)) {
56599 + if (family == AF_INET)
56600 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56601 + else
56602 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56603 + return -EACCES;
56604 + }
56605 +#endif
56606 + return 0;
56607 +}
56608 +
56609 +int
56610 +gr_handle_sock_server(const struct sockaddr *sck)
56611 +{
56612 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56613 + if (grsec_enable_socket_server &&
56614 + in_group_p(grsec_socket_server_gid) &&
56615 + sck && (sck->sa_family != AF_UNIX) &&
56616 + (sck->sa_family != AF_LOCAL)) {
56617 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56618 + return -EACCES;
56619 + }
56620 +#endif
56621 + return 0;
56622 +}
56623 +
56624 +int
56625 +gr_handle_sock_server_other(const struct sock *sck)
56626 +{
56627 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56628 + if (grsec_enable_socket_server &&
56629 + in_group_p(grsec_socket_server_gid) &&
56630 + sck && (sck->sk_family != AF_UNIX) &&
56631 + (sck->sk_family != AF_LOCAL)) {
56632 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56633 + return -EACCES;
56634 + }
56635 +#endif
56636 + return 0;
56637 +}
56638 +
56639 +int
56640 +gr_handle_sock_client(const struct sockaddr *sck)
56641 +{
56642 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56643 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56644 + sck && (sck->sa_family != AF_UNIX) &&
56645 + (sck->sa_family != AF_LOCAL)) {
56646 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56647 + return -EACCES;
56648 + }
56649 +#endif
56650 + return 0;
56651 +}
56652 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56653 new file mode 100644
56654 index 0000000..a1aedd7
56655 --- /dev/null
56656 +++ b/grsecurity/grsec_sysctl.c
56657 @@ -0,0 +1,451 @@
56658 +#include <linux/kernel.h>
56659 +#include <linux/sched.h>
56660 +#include <linux/sysctl.h>
56661 +#include <linux/grsecurity.h>
56662 +#include <linux/grinternal.h>
56663 +
56664 +int
56665 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56666 +{
56667 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56668 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56669 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56670 + return -EACCES;
56671 + }
56672 +#endif
56673 + return 0;
56674 +}
56675 +
56676 +#ifdef CONFIG_GRKERNSEC_ROFS
56677 +static int __maybe_unused one = 1;
56678 +#endif
56679 +
56680 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56681 +struct ctl_table grsecurity_table[] = {
56682 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56683 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56684 +#ifdef CONFIG_GRKERNSEC_IO
56685 + {
56686 + .procname = "disable_priv_io",
56687 + .data = &grsec_disable_privio,
56688 + .maxlen = sizeof(int),
56689 + .mode = 0600,
56690 + .proc_handler = &proc_dointvec,
56691 + },
56692 +#endif
56693 +#endif
56694 +#ifdef CONFIG_GRKERNSEC_LINK
56695 + {
56696 + .procname = "linking_restrictions",
56697 + .data = &grsec_enable_link,
56698 + .maxlen = sizeof(int),
56699 + .mode = 0600,
56700 + .proc_handler = &proc_dointvec,
56701 + },
56702 +#endif
56703 +#ifdef CONFIG_GRKERNSEC_BRUTE
56704 + {
56705 + .procname = "deter_bruteforce",
56706 + .data = &grsec_enable_brute,
56707 + .maxlen = sizeof(int),
56708 + .mode = 0600,
56709 + .proc_handler = &proc_dointvec,
56710 + },
56711 +#endif
56712 +#ifdef CONFIG_GRKERNSEC_FIFO
56713 + {
56714 + .procname = "fifo_restrictions",
56715 + .data = &grsec_enable_fifo,
56716 + .maxlen = sizeof(int),
56717 + .mode = 0600,
56718 + .proc_handler = &proc_dointvec,
56719 + },
56720 +#endif
56721 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56722 + {
56723 + .procname = "ptrace_readexec",
56724 + .data = &grsec_enable_ptrace_readexec,
56725 + .maxlen = sizeof(int),
56726 + .mode = 0600,
56727 + .proc_handler = &proc_dointvec,
56728 + },
56729 +#endif
56730 +#ifdef CONFIG_GRKERNSEC_SETXID
56731 + {
56732 + .procname = "consistent_setxid",
56733 + .data = &grsec_enable_setxid,
56734 + .maxlen = sizeof(int),
56735 + .mode = 0600,
56736 + .proc_handler = &proc_dointvec,
56737 + },
56738 +#endif
56739 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56740 + {
56741 + .procname = "ip_blackhole",
56742 + .data = &grsec_enable_blackhole,
56743 + .maxlen = sizeof(int),
56744 + .mode = 0600,
56745 + .proc_handler = &proc_dointvec,
56746 + },
56747 + {
56748 + .procname = "lastack_retries",
56749 + .data = &grsec_lastack_retries,
56750 + .maxlen = sizeof(int),
56751 + .mode = 0600,
56752 + .proc_handler = &proc_dointvec,
56753 + },
56754 +#endif
56755 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56756 + {
56757 + .procname = "exec_logging",
56758 + .data = &grsec_enable_execlog,
56759 + .maxlen = sizeof(int),
56760 + .mode = 0600,
56761 + .proc_handler = &proc_dointvec,
56762 + },
56763 +#endif
56764 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56765 + {
56766 + .procname = "rwxmap_logging",
56767 + .data = &grsec_enable_log_rwxmaps,
56768 + .maxlen = sizeof(int),
56769 + .mode = 0600,
56770 + .proc_handler = &proc_dointvec,
56771 + },
56772 +#endif
56773 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56774 + {
56775 + .procname = "signal_logging",
56776 + .data = &grsec_enable_signal,
56777 + .maxlen = sizeof(int),
56778 + .mode = 0600,
56779 + .proc_handler = &proc_dointvec,
56780 + },
56781 +#endif
56782 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56783 + {
56784 + .procname = "forkfail_logging",
56785 + .data = &grsec_enable_forkfail,
56786 + .maxlen = sizeof(int),
56787 + .mode = 0600,
56788 + .proc_handler = &proc_dointvec,
56789 + },
56790 +#endif
56791 +#ifdef CONFIG_GRKERNSEC_TIME
56792 + {
56793 + .procname = "timechange_logging",
56794 + .data = &grsec_enable_time,
56795 + .maxlen = sizeof(int),
56796 + .mode = 0600,
56797 + .proc_handler = &proc_dointvec,
56798 + },
56799 +#endif
56800 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56801 + {
56802 + .procname = "chroot_deny_shmat",
56803 + .data = &grsec_enable_chroot_shmat,
56804 + .maxlen = sizeof(int),
56805 + .mode = 0600,
56806 + .proc_handler = &proc_dointvec,
56807 + },
56808 +#endif
56809 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56810 + {
56811 + .procname = "chroot_deny_unix",
56812 + .data = &grsec_enable_chroot_unix,
56813 + .maxlen = sizeof(int),
56814 + .mode = 0600,
56815 + .proc_handler = &proc_dointvec,
56816 + },
56817 +#endif
56818 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56819 + {
56820 + .procname = "chroot_deny_mount",
56821 + .data = &grsec_enable_chroot_mount,
56822 + .maxlen = sizeof(int),
56823 + .mode = 0600,
56824 + .proc_handler = &proc_dointvec,
56825 + },
56826 +#endif
56827 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56828 + {
56829 + .procname = "chroot_deny_fchdir",
56830 + .data = &grsec_enable_chroot_fchdir,
56831 + .maxlen = sizeof(int),
56832 + .mode = 0600,
56833 + .proc_handler = &proc_dointvec,
56834 + },
56835 +#endif
56836 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56837 + {
56838 + .procname = "chroot_deny_chroot",
56839 + .data = &grsec_enable_chroot_double,
56840 + .maxlen = sizeof(int),
56841 + .mode = 0600,
56842 + .proc_handler = &proc_dointvec,
56843 + },
56844 +#endif
56845 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56846 + {
56847 + .procname = "chroot_deny_pivot",
56848 + .data = &grsec_enable_chroot_pivot,
56849 + .maxlen = sizeof(int),
56850 + .mode = 0600,
56851 + .proc_handler = &proc_dointvec,
56852 + },
56853 +#endif
56854 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56855 + {
56856 + .procname = "chroot_enforce_chdir",
56857 + .data = &grsec_enable_chroot_chdir,
56858 + .maxlen = sizeof(int),
56859 + .mode = 0600,
56860 + .proc_handler = &proc_dointvec,
56861 + },
56862 +#endif
56863 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56864 + {
56865 + .procname = "chroot_deny_chmod",
56866 + .data = &grsec_enable_chroot_chmod,
56867 + .maxlen = sizeof(int),
56868 + .mode = 0600,
56869 + .proc_handler = &proc_dointvec,
56870 + },
56871 +#endif
56872 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56873 + {
56874 + .procname = "chroot_deny_mknod",
56875 + .data = &grsec_enable_chroot_mknod,
56876 + .maxlen = sizeof(int),
56877 + .mode = 0600,
56878 + .proc_handler = &proc_dointvec,
56879 + },
56880 +#endif
56881 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56882 + {
56883 + .procname = "chroot_restrict_nice",
56884 + .data = &grsec_enable_chroot_nice,
56885 + .maxlen = sizeof(int),
56886 + .mode = 0600,
56887 + .proc_handler = &proc_dointvec,
56888 + },
56889 +#endif
56890 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56891 + {
56892 + .procname = "chroot_execlog",
56893 + .data = &grsec_enable_chroot_execlog,
56894 + .maxlen = sizeof(int),
56895 + .mode = 0600,
56896 + .proc_handler = &proc_dointvec,
56897 + },
56898 +#endif
56899 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56900 + {
56901 + .procname = "chroot_caps",
56902 + .data = &grsec_enable_chroot_caps,
56903 + .maxlen = sizeof(int),
56904 + .mode = 0600,
56905 + .proc_handler = &proc_dointvec,
56906 + },
56907 +#endif
56908 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56909 + {
56910 + .procname = "chroot_deny_sysctl",
56911 + .data = &grsec_enable_chroot_sysctl,
56912 + .maxlen = sizeof(int),
56913 + .mode = 0600,
56914 + .proc_handler = &proc_dointvec,
56915 + },
56916 +#endif
56917 +#ifdef CONFIG_GRKERNSEC_TPE
56918 + {
56919 + .procname = "tpe",
56920 + .data = &grsec_enable_tpe,
56921 + .maxlen = sizeof(int),
56922 + .mode = 0600,
56923 + .proc_handler = &proc_dointvec,
56924 + },
56925 + {
56926 + .procname = "tpe_gid",
56927 + .data = &grsec_tpe_gid,
56928 + .maxlen = sizeof(int),
56929 + .mode = 0600,
56930 + .proc_handler = &proc_dointvec,
56931 + },
56932 +#endif
56933 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56934 + {
56935 + .procname = "tpe_invert",
56936 + .data = &grsec_enable_tpe_invert,
56937 + .maxlen = sizeof(int),
56938 + .mode = 0600,
56939 + .proc_handler = &proc_dointvec,
56940 + },
56941 +#endif
56942 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56943 + {
56944 + .procname = "tpe_restrict_all",
56945 + .data = &grsec_enable_tpe_all,
56946 + .maxlen = sizeof(int),
56947 + .mode = 0600,
56948 + .proc_handler = &proc_dointvec,
56949 + },
56950 +#endif
56951 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56952 + {
56953 + .procname = "socket_all",
56954 + .data = &grsec_enable_socket_all,
56955 + .maxlen = sizeof(int),
56956 + .mode = 0600,
56957 + .proc_handler = &proc_dointvec,
56958 + },
56959 + {
56960 + .procname = "socket_all_gid",
56961 + .data = &grsec_socket_all_gid,
56962 + .maxlen = sizeof(int),
56963 + .mode = 0600,
56964 + .proc_handler = &proc_dointvec,
56965 + },
56966 +#endif
56967 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56968 + {
56969 + .procname = "socket_client",
56970 + .data = &grsec_enable_socket_client,
56971 + .maxlen = sizeof(int),
56972 + .mode = 0600,
56973 + .proc_handler = &proc_dointvec,
56974 + },
56975 + {
56976 + .procname = "socket_client_gid",
56977 + .data = &grsec_socket_client_gid,
56978 + .maxlen = sizeof(int),
56979 + .mode = 0600,
56980 + .proc_handler = &proc_dointvec,
56981 + },
56982 +#endif
56983 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56984 + {
56985 + .procname = "socket_server",
56986 + .data = &grsec_enable_socket_server,
56987 + .maxlen = sizeof(int),
56988 + .mode = 0600,
56989 + .proc_handler = &proc_dointvec,
56990 + },
56991 + {
56992 + .procname = "socket_server_gid",
56993 + .data = &grsec_socket_server_gid,
56994 + .maxlen = sizeof(int),
56995 + .mode = 0600,
56996 + .proc_handler = &proc_dointvec,
56997 + },
56998 +#endif
56999 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57000 + {
57001 + .procname = "audit_group",
57002 + .data = &grsec_enable_group,
57003 + .maxlen = sizeof(int),
57004 + .mode = 0600,
57005 + .proc_handler = &proc_dointvec,
57006 + },
57007 + {
57008 + .procname = "audit_gid",
57009 + .data = &grsec_audit_gid,
57010 + .maxlen = sizeof(int),
57011 + .mode = 0600,
57012 + .proc_handler = &proc_dointvec,
57013 + },
57014 +#endif
57015 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57016 + {
57017 + .procname = "audit_chdir",
57018 + .data = &grsec_enable_chdir,
57019 + .maxlen = sizeof(int),
57020 + .mode = 0600,
57021 + .proc_handler = &proc_dointvec,
57022 + },
57023 +#endif
57024 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57025 + {
57026 + .procname = "audit_mount",
57027 + .data = &grsec_enable_mount,
57028 + .maxlen = sizeof(int),
57029 + .mode = 0600,
57030 + .proc_handler = &proc_dointvec,
57031 + },
57032 +#endif
57033 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57034 + {
57035 + .procname = "audit_textrel",
57036 + .data = &grsec_enable_audit_textrel,
57037 + .maxlen = sizeof(int),
57038 + .mode = 0600,
57039 + .proc_handler = &proc_dointvec,
57040 + },
57041 +#endif
57042 +#ifdef CONFIG_GRKERNSEC_DMESG
57043 + {
57044 + .procname = "dmesg",
57045 + .data = &grsec_enable_dmesg,
57046 + .maxlen = sizeof(int),
57047 + .mode = 0600,
57048 + .proc_handler = &proc_dointvec,
57049 + },
57050 +#endif
57051 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57052 + {
57053 + .procname = "chroot_findtask",
57054 + .data = &grsec_enable_chroot_findtask,
57055 + .maxlen = sizeof(int),
57056 + .mode = 0600,
57057 + .proc_handler = &proc_dointvec,
57058 + },
57059 +#endif
57060 +#ifdef CONFIG_GRKERNSEC_RESLOG
57061 + {
57062 + .procname = "resource_logging",
57063 + .data = &grsec_resource_logging,
57064 + .maxlen = sizeof(int),
57065 + .mode = 0600,
57066 + .proc_handler = &proc_dointvec,
57067 + },
57068 +#endif
57069 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57070 + {
57071 + .procname = "audit_ptrace",
57072 + .data = &grsec_enable_audit_ptrace,
57073 + .maxlen = sizeof(int),
57074 + .mode = 0600,
57075 + .proc_handler = &proc_dointvec,
57076 + },
57077 +#endif
57078 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57079 + {
57080 + .procname = "harden_ptrace",
57081 + .data = &grsec_enable_harden_ptrace,
57082 + .maxlen = sizeof(int),
57083 + .mode = 0600,
57084 + .proc_handler = &proc_dointvec,
57085 + },
57086 +#endif
57087 + {
57088 + .procname = "grsec_lock",
57089 + .data = &grsec_lock,
57090 + .maxlen = sizeof(int),
57091 + .mode = 0600,
57092 + .proc_handler = &proc_dointvec,
57093 + },
57094 +#endif
57095 +#ifdef CONFIG_GRKERNSEC_ROFS
57096 + {
57097 + .procname = "romount_protect",
57098 + .data = &grsec_enable_rofs,
57099 + .maxlen = sizeof(int),
57100 + .mode = 0600,
57101 + .proc_handler = &proc_dointvec_minmax,
57102 + .extra1 = &one,
57103 + .extra2 = &one,
57104 + },
57105 +#endif
57106 + { }
57107 +};
57108 +#endif
57109 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
57110 new file mode 100644
57111 index 0000000..0dc13c3
57112 --- /dev/null
57113 +++ b/grsecurity/grsec_time.c
57114 @@ -0,0 +1,16 @@
57115 +#include <linux/kernel.h>
57116 +#include <linux/sched.h>
57117 +#include <linux/grinternal.h>
57118 +#include <linux/module.h>
57119 +
57120 +void
57121 +gr_log_timechange(void)
57122 +{
57123 +#ifdef CONFIG_GRKERNSEC_TIME
57124 + if (grsec_enable_time)
57125 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
57126 +#endif
57127 + return;
57128 +}
57129 +
57130 +EXPORT_SYMBOL(gr_log_timechange);
57131 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
57132 new file mode 100644
57133 index 0000000..07e0dc0
57134 --- /dev/null
57135 +++ b/grsecurity/grsec_tpe.c
57136 @@ -0,0 +1,73 @@
57137 +#include <linux/kernel.h>
57138 +#include <linux/sched.h>
57139 +#include <linux/file.h>
57140 +#include <linux/fs.h>
57141 +#include <linux/grinternal.h>
57142 +
57143 +extern int gr_acl_tpe_check(void);
57144 +
57145 +int
57146 +gr_tpe_allow(const struct file *file)
57147 +{
57148 +#ifdef CONFIG_GRKERNSEC
57149 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
57150 + const struct cred *cred = current_cred();
57151 + char *msg = NULL;
57152 + char *msg2 = NULL;
57153 +
57154 + // never restrict root
57155 + if (!cred->uid)
57156 + return 1;
57157 +
57158 + if (grsec_enable_tpe) {
57159 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57160 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57161 + msg = "not being in trusted group";
57162 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57163 + msg = "being in untrusted group";
57164 +#else
57165 + if (in_group_p(grsec_tpe_gid))
57166 + msg = "being in untrusted group";
57167 +#endif
57168 + }
57169 + if (!msg && gr_acl_tpe_check())
57170 + msg = "being in untrusted role";
57171 +
57172 + // not in any affected group/role
57173 + if (!msg)
57174 + goto next_check;
57175 +
57176 + if (inode->i_uid)
57177 + msg2 = "file in non-root-owned directory";
57178 + else if (inode->i_mode & S_IWOTH)
57179 + msg2 = "file in world-writable directory";
57180 + else if (inode->i_mode & S_IWGRP)
57181 + msg2 = "file in group-writable directory";
57182 +
57183 + if (msg && msg2) {
57184 + char fullmsg[70] = {0};
57185 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57186 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57187 + return 0;
57188 + }
57189 + msg = NULL;
57190 +next_check:
57191 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57192 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57193 + return 1;
57194 +
57195 + if (inode->i_uid && (inode->i_uid != cred->uid))
57196 + msg = "directory not owned by user";
57197 + else if (inode->i_mode & S_IWOTH)
57198 + msg = "file in world-writable directory";
57199 + else if (inode->i_mode & S_IWGRP)
57200 + msg = "file in group-writable directory";
57201 +
57202 + if (msg) {
57203 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57204 + return 0;
57205 + }
57206 +#endif
57207 +#endif
57208 + return 1;
57209 +}
57210 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57211 new file mode 100644
57212 index 0000000..9f7b1ac
57213 --- /dev/null
57214 +++ b/grsecurity/grsum.c
57215 @@ -0,0 +1,61 @@
57216 +#include <linux/err.h>
57217 +#include <linux/kernel.h>
57218 +#include <linux/sched.h>
57219 +#include <linux/mm.h>
57220 +#include <linux/scatterlist.h>
57221 +#include <linux/crypto.h>
57222 +#include <linux/gracl.h>
57223 +
57224 +
57225 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57226 +#error "crypto and sha256 must be built into the kernel"
57227 +#endif
57228 +
57229 +int
57230 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57231 +{
57232 + char *p;
57233 + struct crypto_hash *tfm;
57234 + struct hash_desc desc;
57235 + struct scatterlist sg;
57236 + unsigned char temp_sum[GR_SHA_LEN];
57237 + volatile int retval = 0;
57238 + volatile int dummy = 0;
57239 + unsigned int i;
57240 +
57241 + sg_init_table(&sg, 1);
57242 +
57243 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57244 + if (IS_ERR(tfm)) {
57245 + /* should never happen, since sha256 should be built in */
57246 + return 1;
57247 + }
57248 +
57249 + desc.tfm = tfm;
57250 + desc.flags = 0;
57251 +
57252 + crypto_hash_init(&desc);
57253 +
57254 + p = salt;
57255 + sg_set_buf(&sg, p, GR_SALT_LEN);
57256 + crypto_hash_update(&desc, &sg, sg.length);
57257 +
57258 + p = entry->pw;
57259 + sg_set_buf(&sg, p, strlen(p));
57260 +
57261 + crypto_hash_update(&desc, &sg, sg.length);
57262 +
57263 + crypto_hash_final(&desc, temp_sum);
57264 +
57265 + memset(entry->pw, 0, GR_PW_LEN);
57266 +
57267 + for (i = 0; i < GR_SHA_LEN; i++)
57268 + if (sum[i] != temp_sum[i])
57269 + retval = 1;
57270 + else
57271 + dummy = 1; // waste a cycle
57272 +
57273 + crypto_free_hash(tfm);
57274 +
57275 + return retval;
57276 +}
57277 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57278 index 6cd5b64..f620d2d 100644
57279 --- a/include/acpi/acpi_bus.h
57280 +++ b/include/acpi/acpi_bus.h
57281 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57282 acpi_op_bind bind;
57283 acpi_op_unbind unbind;
57284 acpi_op_notify notify;
57285 -};
57286 +} __no_const;
57287
57288 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57289
57290 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57291 index b7babf0..71e4e74 100644
57292 --- a/include/asm-generic/atomic-long.h
57293 +++ b/include/asm-generic/atomic-long.h
57294 @@ -22,6 +22,12 @@
57295
57296 typedef atomic64_t atomic_long_t;
57297
57298 +#ifdef CONFIG_PAX_REFCOUNT
57299 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57300 +#else
57301 +typedef atomic64_t atomic_long_unchecked_t;
57302 +#endif
57303 +
57304 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57305
57306 static inline long atomic_long_read(atomic_long_t *l)
57307 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57308 return (long)atomic64_read(v);
57309 }
57310
57311 +#ifdef CONFIG_PAX_REFCOUNT
57312 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57313 +{
57314 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57315 +
57316 + return (long)atomic64_read_unchecked(v);
57317 +}
57318 +#endif
57319 +
57320 static inline void atomic_long_set(atomic_long_t *l, long i)
57321 {
57322 atomic64_t *v = (atomic64_t *)l;
57323 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57324 atomic64_set(v, i);
57325 }
57326
57327 +#ifdef CONFIG_PAX_REFCOUNT
57328 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57329 +{
57330 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57331 +
57332 + atomic64_set_unchecked(v, i);
57333 +}
57334 +#endif
57335 +
57336 static inline void atomic_long_inc(atomic_long_t *l)
57337 {
57338 atomic64_t *v = (atomic64_t *)l;
57339 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57340 atomic64_inc(v);
57341 }
57342
57343 +#ifdef CONFIG_PAX_REFCOUNT
57344 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57345 +{
57346 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57347 +
57348 + atomic64_inc_unchecked(v);
57349 +}
57350 +#endif
57351 +
57352 static inline void atomic_long_dec(atomic_long_t *l)
57353 {
57354 atomic64_t *v = (atomic64_t *)l;
57355 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57356 atomic64_dec(v);
57357 }
57358
57359 +#ifdef CONFIG_PAX_REFCOUNT
57360 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57361 +{
57362 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57363 +
57364 + atomic64_dec_unchecked(v);
57365 +}
57366 +#endif
57367 +
57368 static inline void atomic_long_add(long i, atomic_long_t *l)
57369 {
57370 atomic64_t *v = (atomic64_t *)l;
57371 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57372 atomic64_add(i, v);
57373 }
57374
57375 +#ifdef CONFIG_PAX_REFCOUNT
57376 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57377 +{
57378 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57379 +
57380 + atomic64_add_unchecked(i, v);
57381 +}
57382 +#endif
57383 +
57384 static inline void atomic_long_sub(long i, atomic_long_t *l)
57385 {
57386 atomic64_t *v = (atomic64_t *)l;
57387 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57388 atomic64_sub(i, v);
57389 }
57390
57391 +#ifdef CONFIG_PAX_REFCOUNT
57392 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57393 +{
57394 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57395 +
57396 + atomic64_sub_unchecked(i, v);
57397 +}
57398 +#endif
57399 +
57400 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57401 {
57402 atomic64_t *v = (atomic64_t *)l;
57403 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57404 return (long)atomic64_inc_return(v);
57405 }
57406
57407 +#ifdef CONFIG_PAX_REFCOUNT
57408 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57409 +{
57410 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57411 +
57412 + return (long)atomic64_inc_return_unchecked(v);
57413 +}
57414 +#endif
57415 +
57416 static inline long atomic_long_dec_return(atomic_long_t *l)
57417 {
57418 atomic64_t *v = (atomic64_t *)l;
57419 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57420
57421 typedef atomic_t atomic_long_t;
57422
57423 +#ifdef CONFIG_PAX_REFCOUNT
57424 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57425 +#else
57426 +typedef atomic_t atomic_long_unchecked_t;
57427 +#endif
57428 +
57429 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57430 static inline long atomic_long_read(atomic_long_t *l)
57431 {
57432 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57433 return (long)atomic_read(v);
57434 }
57435
57436 +#ifdef CONFIG_PAX_REFCOUNT
57437 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57438 +{
57439 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57440 +
57441 + return (long)atomic_read_unchecked(v);
57442 +}
57443 +#endif
57444 +
57445 static inline void atomic_long_set(atomic_long_t *l, long i)
57446 {
57447 atomic_t *v = (atomic_t *)l;
57448 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57449 atomic_set(v, i);
57450 }
57451
57452 +#ifdef CONFIG_PAX_REFCOUNT
57453 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57454 +{
57455 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57456 +
57457 + atomic_set_unchecked(v, i);
57458 +}
57459 +#endif
57460 +
57461 static inline void atomic_long_inc(atomic_long_t *l)
57462 {
57463 atomic_t *v = (atomic_t *)l;
57464 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57465 atomic_inc(v);
57466 }
57467
57468 +#ifdef CONFIG_PAX_REFCOUNT
57469 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57470 +{
57471 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57472 +
57473 + atomic_inc_unchecked(v);
57474 +}
57475 +#endif
57476 +
57477 static inline void atomic_long_dec(atomic_long_t *l)
57478 {
57479 atomic_t *v = (atomic_t *)l;
57480 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57481 atomic_dec(v);
57482 }
57483
57484 +#ifdef CONFIG_PAX_REFCOUNT
57485 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57486 +{
57487 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57488 +
57489 + atomic_dec_unchecked(v);
57490 +}
57491 +#endif
57492 +
57493 static inline void atomic_long_add(long i, atomic_long_t *l)
57494 {
57495 atomic_t *v = (atomic_t *)l;
57496 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57497 atomic_add(i, v);
57498 }
57499
57500 +#ifdef CONFIG_PAX_REFCOUNT
57501 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57502 +{
57503 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57504 +
57505 + atomic_add_unchecked(i, v);
57506 +}
57507 +#endif
57508 +
57509 static inline void atomic_long_sub(long i, atomic_long_t *l)
57510 {
57511 atomic_t *v = (atomic_t *)l;
57512 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57513 atomic_sub(i, v);
57514 }
57515
57516 +#ifdef CONFIG_PAX_REFCOUNT
57517 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57518 +{
57519 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57520 +
57521 + atomic_sub_unchecked(i, v);
57522 +}
57523 +#endif
57524 +
57525 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57526 {
57527 atomic_t *v = (atomic_t *)l;
57528 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57529 return (long)atomic_inc_return(v);
57530 }
57531
57532 +#ifdef CONFIG_PAX_REFCOUNT
57533 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57534 +{
57535 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57536 +
57537 + return (long)atomic_inc_return_unchecked(v);
57538 +}
57539 +#endif
57540 +
57541 static inline long atomic_long_dec_return(atomic_long_t *l)
57542 {
57543 atomic_t *v = (atomic_t *)l;
57544 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57545
57546 #endif /* BITS_PER_LONG == 64 */
57547
57548 +#ifdef CONFIG_PAX_REFCOUNT
57549 +static inline void pax_refcount_needs_these_functions(void)
57550 +{
57551 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57552 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57553 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57554 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57555 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57556 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57557 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57558 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57559 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57560 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57561 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57562 +
57563 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57564 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57565 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57566 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57567 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57568 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57569 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57570 +}
57571 +#else
57572 +#define atomic_read_unchecked(v) atomic_read(v)
57573 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57574 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57575 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57576 +#define atomic_inc_unchecked(v) atomic_inc(v)
57577 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57578 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57579 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57580 +#define atomic_dec_unchecked(v) atomic_dec(v)
57581 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57582 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57583 +
57584 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57585 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57586 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57587 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57588 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57589 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57590 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57591 +#endif
57592 +
57593 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57594 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57595 index b18ce4f..2ee2843 100644
57596 --- a/include/asm-generic/atomic64.h
57597 +++ b/include/asm-generic/atomic64.h
57598 @@ -16,6 +16,8 @@ typedef struct {
57599 long long counter;
57600 } atomic64_t;
57601
57602 +typedef atomic64_t atomic64_unchecked_t;
57603 +
57604 #define ATOMIC64_INIT(i) { (i) }
57605
57606 extern long long atomic64_read(const atomic64_t *v);
57607 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57608 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57609 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57610
57611 +#define atomic64_read_unchecked(v) atomic64_read(v)
57612 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57613 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57614 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57615 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57616 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57617 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57618 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57619 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57620 +
57621 #endif /* _ASM_GENERIC_ATOMIC64_H */
57622 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57623 index 1bfcfe5..e04c5c9 100644
57624 --- a/include/asm-generic/cache.h
57625 +++ b/include/asm-generic/cache.h
57626 @@ -6,7 +6,7 @@
57627 * cache lines need to provide their own cache.h.
57628 */
57629
57630 -#define L1_CACHE_SHIFT 5
57631 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57632 +#define L1_CACHE_SHIFT 5UL
57633 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57634
57635 #endif /* __ASM_GENERIC_CACHE_H */
57636 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57637 index 1ca3efc..e3dc852 100644
57638 --- a/include/asm-generic/int-l64.h
57639 +++ b/include/asm-generic/int-l64.h
57640 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57641 typedef signed long s64;
57642 typedef unsigned long u64;
57643
57644 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57645 +
57646 #define S8_C(x) x
57647 #define U8_C(x) x ## U
57648 #define S16_C(x) x
57649 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57650 index f394147..b6152b9 100644
57651 --- a/include/asm-generic/int-ll64.h
57652 +++ b/include/asm-generic/int-ll64.h
57653 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57654 typedef signed long long s64;
57655 typedef unsigned long long u64;
57656
57657 +typedef unsigned long long intoverflow_t;
57658 +
57659 #define S8_C(x) x
57660 #define U8_C(x) x ## U
57661 #define S16_C(x) x
57662 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57663 index 0232ccb..13d9165 100644
57664 --- a/include/asm-generic/kmap_types.h
57665 +++ b/include/asm-generic/kmap_types.h
57666 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57667 KMAP_D(17) KM_NMI,
57668 KMAP_D(18) KM_NMI_PTE,
57669 KMAP_D(19) KM_KDB,
57670 +KMAP_D(20) KM_CLEARPAGE,
57671 /*
57672 * Remember to update debug_kmap_atomic() when adding new kmap types!
57673 */
57674 -KMAP_D(20) KM_TYPE_NR
57675 +KMAP_D(21) KM_TYPE_NR
57676 };
57677
57678 #undef KMAP_D
57679 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57680 index 725612b..9cc513a 100644
57681 --- a/include/asm-generic/pgtable-nopmd.h
57682 +++ b/include/asm-generic/pgtable-nopmd.h
57683 @@ -1,14 +1,19 @@
57684 #ifndef _PGTABLE_NOPMD_H
57685 #define _PGTABLE_NOPMD_H
57686
57687 -#ifndef __ASSEMBLY__
57688 -
57689 #include <asm-generic/pgtable-nopud.h>
57690
57691 -struct mm_struct;
57692 -
57693 #define __PAGETABLE_PMD_FOLDED
57694
57695 +#define PMD_SHIFT PUD_SHIFT
57696 +#define PTRS_PER_PMD 1
57697 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57698 +#define PMD_MASK (~(PMD_SIZE-1))
57699 +
57700 +#ifndef __ASSEMBLY__
57701 +
57702 +struct mm_struct;
57703 +
57704 /*
57705 * Having the pmd type consist of a pud gets the size right, and allows
57706 * us to conceptually access the pud entry that this pmd is folded into
57707 @@ -16,11 +21,6 @@ struct mm_struct;
57708 */
57709 typedef struct { pud_t pud; } pmd_t;
57710
57711 -#define PMD_SHIFT PUD_SHIFT
57712 -#define PTRS_PER_PMD 1
57713 -#define PMD_SIZE (1UL << PMD_SHIFT)
57714 -#define PMD_MASK (~(PMD_SIZE-1))
57715 -
57716 /*
57717 * The "pud_xxx()" functions here are trivial for a folded two-level
57718 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57719 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57720 index 810431d..ccc3638 100644
57721 --- a/include/asm-generic/pgtable-nopud.h
57722 +++ b/include/asm-generic/pgtable-nopud.h
57723 @@ -1,10 +1,15 @@
57724 #ifndef _PGTABLE_NOPUD_H
57725 #define _PGTABLE_NOPUD_H
57726
57727 -#ifndef __ASSEMBLY__
57728 -
57729 #define __PAGETABLE_PUD_FOLDED
57730
57731 +#define PUD_SHIFT PGDIR_SHIFT
57732 +#define PTRS_PER_PUD 1
57733 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57734 +#define PUD_MASK (~(PUD_SIZE-1))
57735 +
57736 +#ifndef __ASSEMBLY__
57737 +
57738 /*
57739 * Having the pud type consist of a pgd gets the size right, and allows
57740 * us to conceptually access the pgd entry that this pud is folded into
57741 @@ -12,11 +17,6 @@
57742 */
57743 typedef struct { pgd_t pgd; } pud_t;
57744
57745 -#define PUD_SHIFT PGDIR_SHIFT
57746 -#define PTRS_PER_PUD 1
57747 -#define PUD_SIZE (1UL << PUD_SHIFT)
57748 -#define PUD_MASK (~(PUD_SIZE-1))
57749 -
57750 /*
57751 * The "pgd_xxx()" functions here are trivial for a folded two-level
57752 * setup: the pud is never bad, and a pud always exists (as it's folded
57753 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57754 index 76bff2b..c7a14e2 100644
57755 --- a/include/asm-generic/pgtable.h
57756 +++ b/include/asm-generic/pgtable.h
57757 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57758 #endif /* __HAVE_ARCH_PMD_WRITE */
57759 #endif
57760
57761 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57762 +static inline unsigned long pax_open_kernel(void) { return 0; }
57763 +#endif
57764 +
57765 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57766 +static inline unsigned long pax_close_kernel(void) { return 0; }
57767 +#endif
57768 +
57769 #endif /* !__ASSEMBLY__ */
57770
57771 #endif /* _ASM_GENERIC_PGTABLE_H */
57772 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57773 index b5e2e4c..6a5373e 100644
57774 --- a/include/asm-generic/vmlinux.lds.h
57775 +++ b/include/asm-generic/vmlinux.lds.h
57776 @@ -217,6 +217,7 @@
57777 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57778 VMLINUX_SYMBOL(__start_rodata) = .; \
57779 *(.rodata) *(.rodata.*) \
57780 + *(.data..read_only) \
57781 *(__vermagic) /* Kernel version magic */ \
57782 . = ALIGN(8); \
57783 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57784 @@ -722,17 +723,18 @@
57785 * section in the linker script will go there too. @phdr should have
57786 * a leading colon.
57787 *
57788 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57789 + * Note that this macros defines per_cpu_load as an absolute symbol.
57790 * If there is no need to put the percpu section at a predetermined
57791 * address, use PERCPU_SECTION.
57792 */
57793 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57794 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57795 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57796 + per_cpu_load = .; \
57797 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57798 - LOAD_OFFSET) { \
57799 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57800 PERCPU_INPUT(cacheline) \
57801 } phdr \
57802 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57803 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57804
57805 /**
57806 * PERCPU_SECTION - define output section for percpu area, simple version
57807 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57808 index bf4b2dc..2d0762f 100644
57809 --- a/include/drm/drmP.h
57810 +++ b/include/drm/drmP.h
57811 @@ -72,6 +72,7 @@
57812 #include <linux/workqueue.h>
57813 #include <linux/poll.h>
57814 #include <asm/pgalloc.h>
57815 +#include <asm/local.h>
57816 #include "drm.h"
57817
57818 #include <linux/idr.h>
57819 @@ -1038,7 +1039,7 @@ struct drm_device {
57820
57821 /** \name Usage Counters */
57822 /*@{ */
57823 - int open_count; /**< Outstanding files open */
57824 + local_t open_count; /**< Outstanding files open */
57825 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57826 atomic_t vma_count; /**< Outstanding vma areas open */
57827 int buf_use; /**< Buffers in use -- cannot alloc */
57828 @@ -1049,7 +1050,7 @@ struct drm_device {
57829 /*@{ */
57830 unsigned long counters;
57831 enum drm_stat_type types[15];
57832 - atomic_t counts[15];
57833 + atomic_unchecked_t counts[15];
57834 /*@} */
57835
57836 struct list_head filelist;
57837 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57838 index 73b0712..0b7ef2f 100644
57839 --- a/include/drm/drm_crtc_helper.h
57840 +++ b/include/drm/drm_crtc_helper.h
57841 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57842
57843 /* disable crtc when not in use - more explicit than dpms off */
57844 void (*disable)(struct drm_crtc *crtc);
57845 -};
57846 +} __no_const;
57847
57848 struct drm_encoder_helper_funcs {
57849 void (*dpms)(struct drm_encoder *encoder, int mode);
57850 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57851 struct drm_connector *connector);
57852 /* disable encoder when not in use - more explicit than dpms off */
57853 void (*disable)(struct drm_encoder *encoder);
57854 -};
57855 +} __no_const;
57856
57857 struct drm_connector_helper_funcs {
57858 int (*get_modes)(struct drm_connector *connector);
57859 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57860 index 26c1f78..6722682 100644
57861 --- a/include/drm/ttm/ttm_memory.h
57862 +++ b/include/drm/ttm/ttm_memory.h
57863 @@ -47,7 +47,7 @@
57864
57865 struct ttm_mem_shrink {
57866 int (*do_shrink) (struct ttm_mem_shrink *);
57867 -};
57868 +} __no_const;
57869
57870 /**
57871 * struct ttm_mem_global - Global memory accounting structure.
57872 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57873 index e86dfca..40cc55f 100644
57874 --- a/include/linux/a.out.h
57875 +++ b/include/linux/a.out.h
57876 @@ -39,6 +39,14 @@ enum machine_type {
57877 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57878 };
57879
57880 +/* Constants for the N_FLAGS field */
57881 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57882 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57883 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57884 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57885 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57886 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57887 +
57888 #if !defined (N_MAGIC)
57889 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57890 #endif
57891 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57892 index 49a83ca..df96b54 100644
57893 --- a/include/linux/atmdev.h
57894 +++ b/include/linux/atmdev.h
57895 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57896 #endif
57897
57898 struct k_atm_aal_stats {
57899 -#define __HANDLE_ITEM(i) atomic_t i
57900 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57901 __AAL_STAT_ITEMS
57902 #undef __HANDLE_ITEM
57903 };
57904 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57905 index fd88a39..8a801b4 100644
57906 --- a/include/linux/binfmts.h
57907 +++ b/include/linux/binfmts.h
57908 @@ -18,7 +18,7 @@ struct pt_regs;
57909 #define BINPRM_BUF_SIZE 128
57910
57911 #ifdef __KERNEL__
57912 -#include <linux/list.h>
57913 +#include <linux/sched.h>
57914
57915 #define CORENAME_MAX_SIZE 128
57916
57917 @@ -58,6 +58,7 @@ struct linux_binprm {
57918 unsigned interp_flags;
57919 unsigned interp_data;
57920 unsigned long loader, exec;
57921 + char tcomm[TASK_COMM_LEN];
57922 };
57923
57924 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57925 @@ -88,6 +89,7 @@ struct linux_binfmt {
57926 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57927 int (*load_shlib)(struct file *);
57928 int (*core_dump)(struct coredump_params *cprm);
57929 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57930 unsigned long min_coredump; /* minimal dump size */
57931 };
57932
57933 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57934 index 0ed1eb0..3ab569b 100644
57935 --- a/include/linux/blkdev.h
57936 +++ b/include/linux/blkdev.h
57937 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57938 /* this callback is with swap_lock and sometimes page table lock held */
57939 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57940 struct module *owner;
57941 -};
57942 +} __do_const;
57943
57944 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57945 unsigned long);
57946 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57947 index 4d1a074..88f929a 100644
57948 --- a/include/linux/blktrace_api.h
57949 +++ b/include/linux/blktrace_api.h
57950 @@ -162,7 +162,7 @@ struct blk_trace {
57951 struct dentry *dir;
57952 struct dentry *dropped_file;
57953 struct dentry *msg_file;
57954 - atomic_t dropped;
57955 + atomic_unchecked_t dropped;
57956 };
57957
57958 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57959 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57960 index 83195fb..0b0f77d 100644
57961 --- a/include/linux/byteorder/little_endian.h
57962 +++ b/include/linux/byteorder/little_endian.h
57963 @@ -42,51 +42,51 @@
57964
57965 static inline __le64 __cpu_to_le64p(const __u64 *p)
57966 {
57967 - return (__force __le64)*p;
57968 + return (__force const __le64)*p;
57969 }
57970 static inline __u64 __le64_to_cpup(const __le64 *p)
57971 {
57972 - return (__force __u64)*p;
57973 + return (__force const __u64)*p;
57974 }
57975 static inline __le32 __cpu_to_le32p(const __u32 *p)
57976 {
57977 - return (__force __le32)*p;
57978 + return (__force const __le32)*p;
57979 }
57980 static inline __u32 __le32_to_cpup(const __le32 *p)
57981 {
57982 - return (__force __u32)*p;
57983 + return (__force const __u32)*p;
57984 }
57985 static inline __le16 __cpu_to_le16p(const __u16 *p)
57986 {
57987 - return (__force __le16)*p;
57988 + return (__force const __le16)*p;
57989 }
57990 static inline __u16 __le16_to_cpup(const __le16 *p)
57991 {
57992 - return (__force __u16)*p;
57993 + return (__force const __u16)*p;
57994 }
57995 static inline __be64 __cpu_to_be64p(const __u64 *p)
57996 {
57997 - return (__force __be64)__swab64p(p);
57998 + return (__force const __be64)__swab64p(p);
57999 }
58000 static inline __u64 __be64_to_cpup(const __be64 *p)
58001 {
58002 - return __swab64p((__u64 *)p);
58003 + return __swab64p((const __u64 *)p);
58004 }
58005 static inline __be32 __cpu_to_be32p(const __u32 *p)
58006 {
58007 - return (__force __be32)__swab32p(p);
58008 + return (__force const __be32)__swab32p(p);
58009 }
58010 static inline __u32 __be32_to_cpup(const __be32 *p)
58011 {
58012 - return __swab32p((__u32 *)p);
58013 + return __swab32p((const __u32 *)p);
58014 }
58015 static inline __be16 __cpu_to_be16p(const __u16 *p)
58016 {
58017 - return (__force __be16)__swab16p(p);
58018 + return (__force const __be16)__swab16p(p);
58019 }
58020 static inline __u16 __be16_to_cpup(const __be16 *p)
58021 {
58022 - return __swab16p((__u16 *)p);
58023 + return __swab16p((const __u16 *)p);
58024 }
58025 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58026 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58027 diff --git a/include/linux/cache.h b/include/linux/cache.h
58028 index 4c57065..4307975 100644
58029 --- a/include/linux/cache.h
58030 +++ b/include/linux/cache.h
58031 @@ -16,6 +16,10 @@
58032 #define __read_mostly
58033 #endif
58034
58035 +#ifndef __read_only
58036 +#define __read_only __read_mostly
58037 +#endif
58038 +
58039 #ifndef ____cacheline_aligned
58040 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58041 #endif
58042 diff --git a/include/linux/capability.h b/include/linux/capability.h
58043 index a63d13d..069bfd5 100644
58044 --- a/include/linux/capability.h
58045 +++ b/include/linux/capability.h
58046 @@ -548,6 +548,9 @@ extern bool capable(int cap);
58047 extern bool ns_capable(struct user_namespace *ns, int cap);
58048 extern bool task_ns_capable(struct task_struct *t, int cap);
58049 extern bool nsown_capable(int cap);
58050 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
58051 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58052 +extern bool capable_nolog(int cap);
58053
58054 /* audit system wants to get cap info from files as well */
58055 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
58056 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
58057 index 04ffb2e..6799180 100644
58058 --- a/include/linux/cleancache.h
58059 +++ b/include/linux/cleancache.h
58060 @@ -31,7 +31,7 @@ struct cleancache_ops {
58061 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
58062 void (*flush_inode)(int, struct cleancache_filekey);
58063 void (*flush_fs)(int);
58064 -};
58065 +} __no_const;
58066
58067 extern struct cleancache_ops
58068 cleancache_register_ops(struct cleancache_ops *ops);
58069 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
58070 index dfadc96..c0e70c1 100644
58071 --- a/include/linux/compiler-gcc4.h
58072 +++ b/include/linux/compiler-gcc4.h
58073 @@ -31,6 +31,12 @@
58074
58075
58076 #if __GNUC_MINOR__ >= 5
58077 +
58078 +#ifdef CONSTIFY_PLUGIN
58079 +#define __no_const __attribute__((no_const))
58080 +#define __do_const __attribute__((do_const))
58081 +#endif
58082 +
58083 /*
58084 * Mark a position in code as unreachable. This can be used to
58085 * suppress control flow warnings after asm blocks that transfer
58086 @@ -46,6 +52,11 @@
58087 #define __noclone __attribute__((__noclone__))
58088
58089 #endif
58090 +
58091 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58092 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58093 +#define __bos0(ptr) __bos((ptr), 0)
58094 +#define __bos1(ptr) __bos((ptr), 1)
58095 #endif
58096
58097 #if __GNUC_MINOR__ > 0
58098 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
58099 index 320d6c9..8573a1c 100644
58100 --- a/include/linux/compiler.h
58101 +++ b/include/linux/compiler.h
58102 @@ -5,31 +5,62 @@
58103
58104 #ifdef __CHECKER__
58105 # define __user __attribute__((noderef, address_space(1)))
58106 +# define __force_user __force __user
58107 # define __kernel __attribute__((address_space(0)))
58108 +# define __force_kernel __force __kernel
58109 # define __safe __attribute__((safe))
58110 # define __force __attribute__((force))
58111 # define __nocast __attribute__((nocast))
58112 # define __iomem __attribute__((noderef, address_space(2)))
58113 +# define __force_iomem __force __iomem
58114 # define __acquires(x) __attribute__((context(x,0,1)))
58115 # define __releases(x) __attribute__((context(x,1,0)))
58116 # define __acquire(x) __context__(x,1)
58117 # define __release(x) __context__(x,-1)
58118 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
58119 # define __percpu __attribute__((noderef, address_space(3)))
58120 +# define __force_percpu __force __percpu
58121 #ifdef CONFIG_SPARSE_RCU_POINTER
58122 # define __rcu __attribute__((noderef, address_space(4)))
58123 +# define __force_rcu __force __rcu
58124 #else
58125 # define __rcu
58126 +# define __force_rcu
58127 #endif
58128 extern void __chk_user_ptr(const volatile void __user *);
58129 extern void __chk_io_ptr(const volatile void __iomem *);
58130 +#elif defined(CHECKER_PLUGIN)
58131 +//# define __user
58132 +//# define __force_user
58133 +//# define __kernel
58134 +//# define __force_kernel
58135 +# define __safe
58136 +# define __force
58137 +# define __nocast
58138 +# define __iomem
58139 +# define __force_iomem
58140 +# define __chk_user_ptr(x) (void)0
58141 +# define __chk_io_ptr(x) (void)0
58142 +# define __builtin_warning(x, y...) (1)
58143 +# define __acquires(x)
58144 +# define __releases(x)
58145 +# define __acquire(x) (void)0
58146 +# define __release(x) (void)0
58147 +# define __cond_lock(x,c) (c)
58148 +# define __percpu
58149 +# define __force_percpu
58150 +# define __rcu
58151 +# define __force_rcu
58152 #else
58153 # define __user
58154 +# define __force_user
58155 # define __kernel
58156 +# define __force_kernel
58157 # define __safe
58158 # define __force
58159 # define __nocast
58160 # define __iomem
58161 +# define __force_iomem
58162 # define __chk_user_ptr(x) (void)0
58163 # define __chk_io_ptr(x) (void)0
58164 # define __builtin_warning(x, y...) (1)
58165 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
58166 # define __release(x) (void)0
58167 # define __cond_lock(x,c) (c)
58168 # define __percpu
58169 +# define __force_percpu
58170 # define __rcu
58171 +# define __force_rcu
58172 #endif
58173
58174 #ifdef __KERNEL__
58175 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58176 # define __attribute_const__ /* unimplemented */
58177 #endif
58178
58179 +#ifndef __no_const
58180 +# define __no_const
58181 +#endif
58182 +
58183 +#ifndef __do_const
58184 +# define __do_const
58185 +#endif
58186 +
58187 /*
58188 * Tell gcc if a function is cold. The compiler will assume any path
58189 * directly leading to the call is unlikely.
58190 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58191 #define __cold
58192 #endif
58193
58194 +#ifndef __alloc_size
58195 +#define __alloc_size(...)
58196 +#endif
58197 +
58198 +#ifndef __bos
58199 +#define __bos(ptr, arg)
58200 +#endif
58201 +
58202 +#ifndef __bos0
58203 +#define __bos0(ptr)
58204 +#endif
58205 +
58206 +#ifndef __bos1
58207 +#define __bos1(ptr)
58208 +#endif
58209 +
58210 /* Simple shorthand for a section definition */
58211 #ifndef __section
58212 # define __section(S) __attribute__ ((__section__(#S)))
58213 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58214 * use is to mediate communication between process-level code and irq/NMI
58215 * handlers, all running on the same CPU.
58216 */
58217 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58218 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58219 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58220
58221 #endif /* __LINUX_COMPILER_H */
58222 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58223 index e9eaec5..bfeb9bb 100644
58224 --- a/include/linux/cpuset.h
58225 +++ b/include/linux/cpuset.h
58226 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58227 * nodemask.
58228 */
58229 smp_mb();
58230 - --ACCESS_ONCE(current->mems_allowed_change_disable);
58231 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58232 }
58233
58234 static inline void set_mems_allowed(nodemask_t nodemask)
58235 diff --git a/include/linux/cred.h b/include/linux/cred.h
58236 index 4030896..8d6f342 100644
58237 --- a/include/linux/cred.h
58238 +++ b/include/linux/cred.h
58239 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58240 static inline void validate_process_creds(void)
58241 {
58242 }
58243 +static inline void validate_task_creds(struct task_struct *task)
58244 +{
58245 +}
58246 #endif
58247
58248 /**
58249 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58250 index 8a94217..15d49e3 100644
58251 --- a/include/linux/crypto.h
58252 +++ b/include/linux/crypto.h
58253 @@ -365,7 +365,7 @@ struct cipher_tfm {
58254 const u8 *key, unsigned int keylen);
58255 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58256 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58257 -};
58258 +} __no_const;
58259
58260 struct hash_tfm {
58261 int (*init)(struct hash_desc *desc);
58262 @@ -386,13 +386,13 @@ struct compress_tfm {
58263 int (*cot_decompress)(struct crypto_tfm *tfm,
58264 const u8 *src, unsigned int slen,
58265 u8 *dst, unsigned int *dlen);
58266 -};
58267 +} __no_const;
58268
58269 struct rng_tfm {
58270 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58271 unsigned int dlen);
58272 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58273 -};
58274 +} __no_const;
58275
58276 #define crt_ablkcipher crt_u.ablkcipher
58277 #define crt_aead crt_u.aead
58278 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58279 index 7925bf0..d5143d2 100644
58280 --- a/include/linux/decompress/mm.h
58281 +++ b/include/linux/decompress/mm.h
58282 @@ -77,7 +77,7 @@ static void free(void *where)
58283 * warnings when not needed (indeed large_malloc / large_free are not
58284 * needed by inflate */
58285
58286 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58287 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58288 #define free(a) kfree(a)
58289
58290 #define large_malloc(a) vmalloc(a)
58291 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58292 index e13117c..e9fc938 100644
58293 --- a/include/linux/dma-mapping.h
58294 +++ b/include/linux/dma-mapping.h
58295 @@ -46,7 +46,7 @@ struct dma_map_ops {
58296 u64 (*get_required_mask)(struct device *dev);
58297 #endif
58298 int is_phys;
58299 -};
58300 +} __do_const;
58301
58302 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58303
58304 diff --git a/include/linux/efi.h b/include/linux/efi.h
58305 index 2362a0b..cfaf8fcc 100644
58306 --- a/include/linux/efi.h
58307 +++ b/include/linux/efi.h
58308 @@ -446,7 +446,7 @@ struct efivar_operations {
58309 efi_get_variable_t *get_variable;
58310 efi_get_next_variable_t *get_next_variable;
58311 efi_set_variable_t *set_variable;
58312 -};
58313 +} __no_const;
58314
58315 struct efivars {
58316 /*
58317 diff --git a/include/linux/elf.h b/include/linux/elf.h
58318 index 31f0508..5421c01 100644
58319 --- a/include/linux/elf.h
58320 +++ b/include/linux/elf.h
58321 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58322 #define PT_GNU_EH_FRAME 0x6474e550
58323
58324 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58325 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58326 +
58327 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58328 +
58329 +/* Constants for the e_flags field */
58330 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58331 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58332 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58333 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58334 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58335 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58336
58337 /*
58338 * Extended Numbering
58339 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58340 #define DT_DEBUG 21
58341 #define DT_TEXTREL 22
58342 #define DT_JMPREL 23
58343 +#define DT_FLAGS 30
58344 + #define DF_TEXTREL 0x00000004
58345 #define DT_ENCODING 32
58346 #define OLD_DT_LOOS 0x60000000
58347 #define DT_LOOS 0x6000000d
58348 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58349 #define PF_W 0x2
58350 #define PF_X 0x1
58351
58352 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58353 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58354 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58355 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58356 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58357 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58358 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58359 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58360 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58361 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58362 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58363 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58364 +
58365 typedef struct elf32_phdr{
58366 Elf32_Word p_type;
58367 Elf32_Off p_offset;
58368 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58369 #define EI_OSABI 7
58370 #define EI_PAD 8
58371
58372 +#define EI_PAX 14
58373 +
58374 #define ELFMAG0 0x7f /* EI_MAG */
58375 #define ELFMAG1 'E'
58376 #define ELFMAG2 'L'
58377 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58378 #define elf_note elf32_note
58379 #define elf_addr_t Elf32_Off
58380 #define Elf_Half Elf32_Half
58381 +#define elf_dyn Elf32_Dyn
58382
58383 #else
58384
58385 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58386 #define elf_note elf64_note
58387 #define elf_addr_t Elf64_Off
58388 #define Elf_Half Elf64_Half
58389 +#define elf_dyn Elf64_Dyn
58390
58391 #endif
58392
58393 diff --git a/include/linux/filter.h b/include/linux/filter.h
58394 index 8eeb205..d59bfa2 100644
58395 --- a/include/linux/filter.h
58396 +++ b/include/linux/filter.h
58397 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58398
58399 struct sk_buff;
58400 struct sock;
58401 +struct bpf_jit_work;
58402
58403 struct sk_filter
58404 {
58405 @@ -141,6 +142,9 @@ struct sk_filter
58406 unsigned int len; /* Number of filter blocks */
58407 unsigned int (*bpf_func)(const struct sk_buff *skb,
58408 const struct sock_filter *filter);
58409 +#ifdef CONFIG_BPF_JIT
58410 + struct bpf_jit_work *work;
58411 +#endif
58412 struct rcu_head rcu;
58413 struct sock_filter insns[0];
58414 };
58415 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58416 index 84ccf8e..2e9b14c 100644
58417 --- a/include/linux/firewire.h
58418 +++ b/include/linux/firewire.h
58419 @@ -428,7 +428,7 @@ struct fw_iso_context {
58420 union {
58421 fw_iso_callback_t sc;
58422 fw_iso_mc_callback_t mc;
58423 - } callback;
58424 + } __no_const callback;
58425 void *callback_data;
58426 };
58427
58428 diff --git a/include/linux/fs.h b/include/linux/fs.h
58429 index 10b2288..09180e4 100644
58430 --- a/include/linux/fs.h
58431 +++ b/include/linux/fs.h
58432 @@ -1609,7 +1609,8 @@ struct file_operations {
58433 int (*setlease)(struct file *, long, struct file_lock **);
58434 long (*fallocate)(struct file *file, int mode, loff_t offset,
58435 loff_t len);
58436 -};
58437 +} __do_const;
58438 +typedef struct file_operations __no_const file_operations_no_const;
58439
58440 struct inode_operations {
58441 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58442 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58443 index 003dc0f..3c4ea97 100644
58444 --- a/include/linux/fs_struct.h
58445 +++ b/include/linux/fs_struct.h
58446 @@ -6,7 +6,7 @@
58447 #include <linux/seqlock.h>
58448
58449 struct fs_struct {
58450 - int users;
58451 + atomic_t users;
58452 spinlock_t lock;
58453 seqcount_t seq;
58454 int umask;
58455 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58456 index ce31408..b1ad003 100644
58457 --- a/include/linux/fscache-cache.h
58458 +++ b/include/linux/fscache-cache.h
58459 @@ -102,7 +102,7 @@ struct fscache_operation {
58460 fscache_operation_release_t release;
58461 };
58462
58463 -extern atomic_t fscache_op_debug_id;
58464 +extern atomic_unchecked_t fscache_op_debug_id;
58465 extern void fscache_op_work_func(struct work_struct *work);
58466
58467 extern void fscache_enqueue_operation(struct fscache_operation *);
58468 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58469 {
58470 INIT_WORK(&op->work, fscache_op_work_func);
58471 atomic_set(&op->usage, 1);
58472 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58473 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58474 op->processor = processor;
58475 op->release = release;
58476 INIT_LIST_HEAD(&op->pend_link);
58477 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58478 index 2a53f10..0187fdf 100644
58479 --- a/include/linux/fsnotify.h
58480 +++ b/include/linux/fsnotify.h
58481 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58482 */
58483 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58484 {
58485 - return kstrdup(name, GFP_KERNEL);
58486 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58487 }
58488
58489 /*
58490 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58491 index 91d0e0a3..035666b 100644
58492 --- a/include/linux/fsnotify_backend.h
58493 +++ b/include/linux/fsnotify_backend.h
58494 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58495 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58496 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58497 };
58498 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58499
58500 /*
58501 * A group is a "thing" that wants to receive notification about filesystem
58502 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58503 index c3da42d..c70e0df 100644
58504 --- a/include/linux/ftrace_event.h
58505 +++ b/include/linux/ftrace_event.h
58506 @@ -97,7 +97,7 @@ struct trace_event_functions {
58507 trace_print_func raw;
58508 trace_print_func hex;
58509 trace_print_func binary;
58510 -};
58511 +} __no_const;
58512
58513 struct trace_event {
58514 struct hlist_node node;
58515 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58516 extern int trace_add_event_call(struct ftrace_event_call *call);
58517 extern void trace_remove_event_call(struct ftrace_event_call *call);
58518
58519 -#define is_signed_type(type) (((type)(-1)) < 0)
58520 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58521
58522 int trace_set_clr_event(const char *system, const char *event, int set);
58523
58524 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58525 index 6d18f35..ab71e2c 100644
58526 --- a/include/linux/genhd.h
58527 +++ b/include/linux/genhd.h
58528 @@ -185,7 +185,7 @@ struct gendisk {
58529 struct kobject *slave_dir;
58530
58531 struct timer_rand_state *random;
58532 - atomic_t sync_io; /* RAID */
58533 + atomic_unchecked_t sync_io; /* RAID */
58534 struct disk_events *ev;
58535 #ifdef CONFIG_BLK_DEV_INTEGRITY
58536 struct blk_integrity *integrity;
58537 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58538 new file mode 100644
58539 index 0000000..8a130b6
58540 --- /dev/null
58541 +++ b/include/linux/gracl.h
58542 @@ -0,0 +1,319 @@
58543 +#ifndef GR_ACL_H
58544 +#define GR_ACL_H
58545 +
58546 +#include <linux/grdefs.h>
58547 +#include <linux/resource.h>
58548 +#include <linux/capability.h>
58549 +#include <linux/dcache.h>
58550 +#include <asm/resource.h>
58551 +
58552 +/* Major status information */
58553 +
58554 +#define GR_VERSION "grsecurity 2.9"
58555 +#define GRSECURITY_VERSION 0x2900
58556 +
58557 +enum {
58558 + GR_SHUTDOWN = 0,
58559 + GR_ENABLE = 1,
58560 + GR_SPROLE = 2,
58561 + GR_RELOAD = 3,
58562 + GR_SEGVMOD = 4,
58563 + GR_STATUS = 5,
58564 + GR_UNSPROLE = 6,
58565 + GR_PASSSET = 7,
58566 + GR_SPROLEPAM = 8,
58567 +};
58568 +
58569 +/* Password setup definitions
58570 + * kernel/grhash.c */
58571 +enum {
58572 + GR_PW_LEN = 128,
58573 + GR_SALT_LEN = 16,
58574 + GR_SHA_LEN = 32,
58575 +};
58576 +
58577 +enum {
58578 + GR_SPROLE_LEN = 64,
58579 +};
58580 +
58581 +enum {
58582 + GR_NO_GLOB = 0,
58583 + GR_REG_GLOB,
58584 + GR_CREATE_GLOB
58585 +};
58586 +
58587 +#define GR_NLIMITS 32
58588 +
58589 +/* Begin Data Structures */
58590 +
58591 +struct sprole_pw {
58592 + unsigned char *rolename;
58593 + unsigned char salt[GR_SALT_LEN];
58594 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58595 +};
58596 +
58597 +struct name_entry {
58598 + __u32 key;
58599 + ino_t inode;
58600 + dev_t device;
58601 + char *name;
58602 + __u16 len;
58603 + __u8 deleted;
58604 + struct name_entry *prev;
58605 + struct name_entry *next;
58606 +};
58607 +
58608 +struct inodev_entry {
58609 + struct name_entry *nentry;
58610 + struct inodev_entry *prev;
58611 + struct inodev_entry *next;
58612 +};
58613 +
58614 +struct acl_role_db {
58615 + struct acl_role_label **r_hash;
58616 + __u32 r_size;
58617 +};
58618 +
58619 +struct inodev_db {
58620 + struct inodev_entry **i_hash;
58621 + __u32 i_size;
58622 +};
58623 +
58624 +struct name_db {
58625 + struct name_entry **n_hash;
58626 + __u32 n_size;
58627 +};
58628 +
58629 +struct crash_uid {
58630 + uid_t uid;
58631 + unsigned long expires;
58632 +};
58633 +
58634 +struct gr_hash_struct {
58635 + void **table;
58636 + void **nametable;
58637 + void *first;
58638 + __u32 table_size;
58639 + __u32 used_size;
58640 + int type;
58641 +};
58642 +
58643 +/* Userspace Grsecurity ACL data structures */
58644 +
58645 +struct acl_subject_label {
58646 + char *filename;
58647 + ino_t inode;
58648 + dev_t device;
58649 + __u32 mode;
58650 + kernel_cap_t cap_mask;
58651 + kernel_cap_t cap_lower;
58652 + kernel_cap_t cap_invert_audit;
58653 +
58654 + struct rlimit res[GR_NLIMITS];
58655 + __u32 resmask;
58656 +
58657 + __u8 user_trans_type;
58658 + __u8 group_trans_type;
58659 + uid_t *user_transitions;
58660 + gid_t *group_transitions;
58661 + __u16 user_trans_num;
58662 + __u16 group_trans_num;
58663 +
58664 + __u32 sock_families[2];
58665 + __u32 ip_proto[8];
58666 + __u32 ip_type;
58667 + struct acl_ip_label **ips;
58668 + __u32 ip_num;
58669 + __u32 inaddr_any_override;
58670 +
58671 + __u32 crashes;
58672 + unsigned long expires;
58673 +
58674 + struct acl_subject_label *parent_subject;
58675 + struct gr_hash_struct *hash;
58676 + struct acl_subject_label *prev;
58677 + struct acl_subject_label *next;
58678 +
58679 + struct acl_object_label **obj_hash;
58680 + __u32 obj_hash_size;
58681 + __u16 pax_flags;
58682 +};
58683 +
58684 +struct role_allowed_ip {
58685 + __u32 addr;
58686 + __u32 netmask;
58687 +
58688 + struct role_allowed_ip *prev;
58689 + struct role_allowed_ip *next;
58690 +};
58691 +
58692 +struct role_transition {
58693 + char *rolename;
58694 +
58695 + struct role_transition *prev;
58696 + struct role_transition *next;
58697 +};
58698 +
58699 +struct acl_role_label {
58700 + char *rolename;
58701 + uid_t uidgid;
58702 + __u16 roletype;
58703 +
58704 + __u16 auth_attempts;
58705 + unsigned long expires;
58706 +
58707 + struct acl_subject_label *root_label;
58708 + struct gr_hash_struct *hash;
58709 +
58710 + struct acl_role_label *prev;
58711 + struct acl_role_label *next;
58712 +
58713 + struct role_transition *transitions;
58714 + struct role_allowed_ip *allowed_ips;
58715 + uid_t *domain_children;
58716 + __u16 domain_child_num;
58717 +
58718 + umode_t umask;
58719 +
58720 + struct acl_subject_label **subj_hash;
58721 + __u32 subj_hash_size;
58722 +};
58723 +
58724 +struct user_acl_role_db {
58725 + struct acl_role_label **r_table;
58726 + __u32 num_pointers; /* Number of allocations to track */
58727 + __u32 num_roles; /* Number of roles */
58728 + __u32 num_domain_children; /* Number of domain children */
58729 + __u32 num_subjects; /* Number of subjects */
58730 + __u32 num_objects; /* Number of objects */
58731 +};
58732 +
58733 +struct acl_object_label {
58734 + char *filename;
58735 + ino_t inode;
58736 + dev_t device;
58737 + __u32 mode;
58738 +
58739 + struct acl_subject_label *nested;
58740 + struct acl_object_label *globbed;
58741 +
58742 + /* next two structures not used */
58743 +
58744 + struct acl_object_label *prev;
58745 + struct acl_object_label *next;
58746 +};
58747 +
58748 +struct acl_ip_label {
58749 + char *iface;
58750 + __u32 addr;
58751 + __u32 netmask;
58752 + __u16 low, high;
58753 + __u8 mode;
58754 + __u32 type;
58755 + __u32 proto[8];
58756 +
58757 + /* next two structures not used */
58758 +
58759 + struct acl_ip_label *prev;
58760 + struct acl_ip_label *next;
58761 +};
58762 +
58763 +struct gr_arg {
58764 + struct user_acl_role_db role_db;
58765 + unsigned char pw[GR_PW_LEN];
58766 + unsigned char salt[GR_SALT_LEN];
58767 + unsigned char sum[GR_SHA_LEN];
58768 + unsigned char sp_role[GR_SPROLE_LEN];
58769 + struct sprole_pw *sprole_pws;
58770 + dev_t segv_device;
58771 + ino_t segv_inode;
58772 + uid_t segv_uid;
58773 + __u16 num_sprole_pws;
58774 + __u16 mode;
58775 +};
58776 +
58777 +struct gr_arg_wrapper {
58778 + struct gr_arg *arg;
58779 + __u32 version;
58780 + __u32 size;
58781 +};
58782 +
58783 +struct subject_map {
58784 + struct acl_subject_label *user;
58785 + struct acl_subject_label *kernel;
58786 + struct subject_map *prev;
58787 + struct subject_map *next;
58788 +};
58789 +
58790 +struct acl_subj_map_db {
58791 + struct subject_map **s_hash;
58792 + __u32 s_size;
58793 +};
58794 +
58795 +/* End Data Structures Section */
58796 +
58797 +/* Hash functions generated by empirical testing by Brad Spengler
58798 + Makes good use of the low bits of the inode. Generally 0-1 times
58799 + in loop for successful match. 0-3 for unsuccessful match.
58800 + Shift/add algorithm with modulus of table size and an XOR*/
58801 +
58802 +static __inline__ unsigned int
58803 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58804 +{
58805 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58806 +}
58807 +
58808 + static __inline__ unsigned int
58809 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58810 +{
58811 + return ((const unsigned long)userp % sz);
58812 +}
58813 +
58814 +static __inline__ unsigned int
58815 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58816 +{
58817 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58818 +}
58819 +
58820 +static __inline__ unsigned int
58821 +nhash(const char *name, const __u16 len, const unsigned int sz)
58822 +{
58823 + return full_name_hash((const unsigned char *)name, len) % sz;
58824 +}
58825 +
58826 +#define FOR_EACH_ROLE_START(role) \
58827 + role = role_list; \
58828 + while (role) {
58829 +
58830 +#define FOR_EACH_ROLE_END(role) \
58831 + role = role->prev; \
58832 + }
58833 +
58834 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58835 + subj = NULL; \
58836 + iter = 0; \
58837 + while (iter < role->subj_hash_size) { \
58838 + if (subj == NULL) \
58839 + subj = role->subj_hash[iter]; \
58840 + if (subj == NULL) { \
58841 + iter++; \
58842 + continue; \
58843 + }
58844 +
58845 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58846 + subj = subj->next; \
58847 + if (subj == NULL) \
58848 + iter++; \
58849 + }
58850 +
58851 +
58852 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58853 + subj = role->hash->first; \
58854 + while (subj != NULL) {
58855 +
58856 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58857 + subj = subj->next; \
58858 + }
58859 +
58860 +#endif
58861 +
58862 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58863 new file mode 100644
58864 index 0000000..323ecf2
58865 --- /dev/null
58866 +++ b/include/linux/gralloc.h
58867 @@ -0,0 +1,9 @@
58868 +#ifndef __GRALLOC_H
58869 +#define __GRALLOC_H
58870 +
58871 +void acl_free_all(void);
58872 +int acl_alloc_stack_init(unsigned long size);
58873 +void *acl_alloc(unsigned long len);
58874 +void *acl_alloc_num(unsigned long num, unsigned long len);
58875 +
58876 +#endif
58877 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58878 new file mode 100644
58879 index 0000000..b30e9bc
58880 --- /dev/null
58881 +++ b/include/linux/grdefs.h
58882 @@ -0,0 +1,140 @@
58883 +#ifndef GRDEFS_H
58884 +#define GRDEFS_H
58885 +
58886 +/* Begin grsecurity status declarations */
58887 +
58888 +enum {
58889 + GR_READY = 0x01,
58890 + GR_STATUS_INIT = 0x00 // disabled state
58891 +};
58892 +
58893 +/* Begin ACL declarations */
58894 +
58895 +/* Role flags */
58896 +
58897 +enum {
58898 + GR_ROLE_USER = 0x0001,
58899 + GR_ROLE_GROUP = 0x0002,
58900 + GR_ROLE_DEFAULT = 0x0004,
58901 + GR_ROLE_SPECIAL = 0x0008,
58902 + GR_ROLE_AUTH = 0x0010,
58903 + GR_ROLE_NOPW = 0x0020,
58904 + GR_ROLE_GOD = 0x0040,
58905 + GR_ROLE_LEARN = 0x0080,
58906 + GR_ROLE_TPE = 0x0100,
58907 + GR_ROLE_DOMAIN = 0x0200,
58908 + GR_ROLE_PAM = 0x0400,
58909 + GR_ROLE_PERSIST = 0x0800
58910 +};
58911 +
58912 +/* ACL Subject and Object mode flags */
58913 +enum {
58914 + GR_DELETED = 0x80000000
58915 +};
58916 +
58917 +/* ACL Object-only mode flags */
58918 +enum {
58919 + GR_READ = 0x00000001,
58920 + GR_APPEND = 0x00000002,
58921 + GR_WRITE = 0x00000004,
58922 + GR_EXEC = 0x00000008,
58923 + GR_FIND = 0x00000010,
58924 + GR_INHERIT = 0x00000020,
58925 + GR_SETID = 0x00000040,
58926 + GR_CREATE = 0x00000080,
58927 + GR_DELETE = 0x00000100,
58928 + GR_LINK = 0x00000200,
58929 + GR_AUDIT_READ = 0x00000400,
58930 + GR_AUDIT_APPEND = 0x00000800,
58931 + GR_AUDIT_WRITE = 0x00001000,
58932 + GR_AUDIT_EXEC = 0x00002000,
58933 + GR_AUDIT_FIND = 0x00004000,
58934 + GR_AUDIT_INHERIT= 0x00008000,
58935 + GR_AUDIT_SETID = 0x00010000,
58936 + GR_AUDIT_CREATE = 0x00020000,
58937 + GR_AUDIT_DELETE = 0x00040000,
58938 + GR_AUDIT_LINK = 0x00080000,
58939 + GR_PTRACERD = 0x00100000,
58940 + GR_NOPTRACE = 0x00200000,
58941 + GR_SUPPRESS = 0x00400000,
58942 + GR_NOLEARN = 0x00800000,
58943 + GR_INIT_TRANSFER= 0x01000000
58944 +};
58945 +
58946 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58947 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58948 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58949 +
58950 +/* ACL subject-only mode flags */
58951 +enum {
58952 + GR_KILL = 0x00000001,
58953 + GR_VIEW = 0x00000002,
58954 + GR_PROTECTED = 0x00000004,
58955 + GR_LEARN = 0x00000008,
58956 + GR_OVERRIDE = 0x00000010,
58957 + /* just a placeholder, this mode is only used in userspace */
58958 + GR_DUMMY = 0x00000020,
58959 + GR_PROTSHM = 0x00000040,
58960 + GR_KILLPROC = 0x00000080,
58961 + GR_KILLIPPROC = 0x00000100,
58962 + /* just a placeholder, this mode is only used in userspace */
58963 + GR_NOTROJAN = 0x00000200,
58964 + GR_PROTPROCFD = 0x00000400,
58965 + GR_PROCACCT = 0x00000800,
58966 + GR_RELAXPTRACE = 0x00001000,
58967 + GR_NESTED = 0x00002000,
58968 + GR_INHERITLEARN = 0x00004000,
58969 + GR_PROCFIND = 0x00008000,
58970 + GR_POVERRIDE = 0x00010000,
58971 + GR_KERNELAUTH = 0x00020000,
58972 + GR_ATSECURE = 0x00040000,
58973 + GR_SHMEXEC = 0x00080000
58974 +};
58975 +
58976 +enum {
58977 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58978 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58979 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58980 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58981 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58982 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58983 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58984 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58985 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58986 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58987 +};
58988 +
58989 +enum {
58990 + GR_ID_USER = 0x01,
58991 + GR_ID_GROUP = 0x02,
58992 +};
58993 +
58994 +enum {
58995 + GR_ID_ALLOW = 0x01,
58996 + GR_ID_DENY = 0x02,
58997 +};
58998 +
58999 +#define GR_CRASH_RES 31
59000 +#define GR_UIDTABLE_MAX 500
59001 +
59002 +/* begin resource learning section */
59003 +enum {
59004 + GR_RLIM_CPU_BUMP = 60,
59005 + GR_RLIM_FSIZE_BUMP = 50000,
59006 + GR_RLIM_DATA_BUMP = 10000,
59007 + GR_RLIM_STACK_BUMP = 1000,
59008 + GR_RLIM_CORE_BUMP = 10000,
59009 + GR_RLIM_RSS_BUMP = 500000,
59010 + GR_RLIM_NPROC_BUMP = 1,
59011 + GR_RLIM_NOFILE_BUMP = 5,
59012 + GR_RLIM_MEMLOCK_BUMP = 50000,
59013 + GR_RLIM_AS_BUMP = 500000,
59014 + GR_RLIM_LOCKS_BUMP = 2,
59015 + GR_RLIM_SIGPENDING_BUMP = 5,
59016 + GR_RLIM_MSGQUEUE_BUMP = 10000,
59017 + GR_RLIM_NICE_BUMP = 1,
59018 + GR_RLIM_RTPRIO_BUMP = 1,
59019 + GR_RLIM_RTTIME_BUMP = 1000000
59020 +};
59021 +
59022 +#endif
59023 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
59024 new file mode 100644
59025 index 0000000..da390f1
59026 --- /dev/null
59027 +++ b/include/linux/grinternal.h
59028 @@ -0,0 +1,221 @@
59029 +#ifndef __GRINTERNAL_H
59030 +#define __GRINTERNAL_H
59031 +
59032 +#ifdef CONFIG_GRKERNSEC
59033 +
59034 +#include <linux/fs.h>
59035 +#include <linux/mnt_namespace.h>
59036 +#include <linux/nsproxy.h>
59037 +#include <linux/gracl.h>
59038 +#include <linux/grdefs.h>
59039 +#include <linux/grmsg.h>
59040 +
59041 +void gr_add_learn_entry(const char *fmt, ...)
59042 + __attribute__ ((format (printf, 1, 2)));
59043 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59044 + const struct vfsmount *mnt);
59045 +__u32 gr_check_create(const struct dentry *new_dentry,
59046 + const struct dentry *parent,
59047 + const struct vfsmount *mnt, const __u32 mode);
59048 +int gr_check_protected_task(const struct task_struct *task);
59049 +__u32 to_gr_audit(const __u32 reqmode);
59050 +int gr_set_acls(const int type);
59051 +int gr_apply_subject_to_task(struct task_struct *task);
59052 +int gr_acl_is_enabled(void);
59053 +char gr_roletype_to_char(void);
59054 +
59055 +void gr_handle_alertkill(struct task_struct *task);
59056 +char *gr_to_filename(const struct dentry *dentry,
59057 + const struct vfsmount *mnt);
59058 +char *gr_to_filename1(const struct dentry *dentry,
59059 + const struct vfsmount *mnt);
59060 +char *gr_to_filename2(const struct dentry *dentry,
59061 + const struct vfsmount *mnt);
59062 +char *gr_to_filename3(const struct dentry *dentry,
59063 + const struct vfsmount *mnt);
59064 +
59065 +extern int grsec_enable_ptrace_readexec;
59066 +extern int grsec_enable_harden_ptrace;
59067 +extern int grsec_enable_link;
59068 +extern int grsec_enable_fifo;
59069 +extern int grsec_enable_execve;
59070 +extern int grsec_enable_shm;
59071 +extern int grsec_enable_execlog;
59072 +extern int grsec_enable_signal;
59073 +extern int grsec_enable_audit_ptrace;
59074 +extern int grsec_enable_forkfail;
59075 +extern int grsec_enable_time;
59076 +extern int grsec_enable_rofs;
59077 +extern int grsec_enable_chroot_shmat;
59078 +extern int grsec_enable_chroot_mount;
59079 +extern int grsec_enable_chroot_double;
59080 +extern int grsec_enable_chroot_pivot;
59081 +extern int grsec_enable_chroot_chdir;
59082 +extern int grsec_enable_chroot_chmod;
59083 +extern int grsec_enable_chroot_mknod;
59084 +extern int grsec_enable_chroot_fchdir;
59085 +extern int grsec_enable_chroot_nice;
59086 +extern int grsec_enable_chroot_execlog;
59087 +extern int grsec_enable_chroot_caps;
59088 +extern int grsec_enable_chroot_sysctl;
59089 +extern int grsec_enable_chroot_unix;
59090 +extern int grsec_enable_tpe;
59091 +extern int grsec_tpe_gid;
59092 +extern int grsec_enable_tpe_all;
59093 +extern int grsec_enable_tpe_invert;
59094 +extern int grsec_enable_socket_all;
59095 +extern int grsec_socket_all_gid;
59096 +extern int grsec_enable_socket_client;
59097 +extern int grsec_socket_client_gid;
59098 +extern int grsec_enable_socket_server;
59099 +extern int grsec_socket_server_gid;
59100 +extern int grsec_audit_gid;
59101 +extern int grsec_enable_group;
59102 +extern int grsec_enable_audit_textrel;
59103 +extern int grsec_enable_log_rwxmaps;
59104 +extern int grsec_enable_mount;
59105 +extern int grsec_enable_chdir;
59106 +extern int grsec_resource_logging;
59107 +extern int grsec_enable_blackhole;
59108 +extern int grsec_lastack_retries;
59109 +extern int grsec_enable_brute;
59110 +extern int grsec_lock;
59111 +
59112 +extern spinlock_t grsec_alert_lock;
59113 +extern unsigned long grsec_alert_wtime;
59114 +extern unsigned long grsec_alert_fyet;
59115 +
59116 +extern spinlock_t grsec_audit_lock;
59117 +
59118 +extern rwlock_t grsec_exec_file_lock;
59119 +
59120 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59121 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59122 + (tsk)->exec_file->f_vfsmnt) : "/")
59123 +
59124 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59125 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59126 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59127 +
59128 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59129 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
59130 + (tsk)->exec_file->f_vfsmnt) : "/")
59131 +
59132 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59133 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59134 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59135 +
59136 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59137 +
59138 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59139 +
59140 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59141 + (task)->pid, (cred)->uid, \
59142 + (cred)->euid, (cred)->gid, (cred)->egid, \
59143 + gr_parent_task_fullpath(task), \
59144 + (task)->real_parent->comm, (task)->real_parent->pid, \
59145 + (pcred)->uid, (pcred)->euid, \
59146 + (pcred)->gid, (pcred)->egid
59147 +
59148 +#define GR_CHROOT_CAPS {{ \
59149 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59150 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59151 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59152 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59153 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59154 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
59155 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
59156 +
59157 +#define security_learn(normal_msg,args...) \
59158 +({ \
59159 + read_lock(&grsec_exec_file_lock); \
59160 + gr_add_learn_entry(normal_msg "\n", ## args); \
59161 + read_unlock(&grsec_exec_file_lock); \
59162 +})
59163 +
59164 +enum {
59165 + GR_DO_AUDIT,
59166 + GR_DONT_AUDIT,
59167 + /* used for non-audit messages that we shouldn't kill the task on */
59168 + GR_DONT_AUDIT_GOOD
59169 +};
59170 +
59171 +enum {
59172 + GR_TTYSNIFF,
59173 + GR_RBAC,
59174 + GR_RBAC_STR,
59175 + GR_STR_RBAC,
59176 + GR_RBAC_MODE2,
59177 + GR_RBAC_MODE3,
59178 + GR_FILENAME,
59179 + GR_SYSCTL_HIDDEN,
59180 + GR_NOARGS,
59181 + GR_ONE_INT,
59182 + GR_ONE_INT_TWO_STR,
59183 + GR_ONE_STR,
59184 + GR_STR_INT,
59185 + GR_TWO_STR_INT,
59186 + GR_TWO_INT,
59187 + GR_TWO_U64,
59188 + GR_THREE_INT,
59189 + GR_FIVE_INT_TWO_STR,
59190 + GR_TWO_STR,
59191 + GR_THREE_STR,
59192 + GR_FOUR_STR,
59193 + GR_STR_FILENAME,
59194 + GR_FILENAME_STR,
59195 + GR_FILENAME_TWO_INT,
59196 + GR_FILENAME_TWO_INT_STR,
59197 + GR_TEXTREL,
59198 + GR_PTRACE,
59199 + GR_RESOURCE,
59200 + GR_CAP,
59201 + GR_SIG,
59202 + GR_SIG2,
59203 + GR_CRASH1,
59204 + GR_CRASH2,
59205 + GR_PSACCT,
59206 + GR_RWXMAP
59207 +};
59208 +
59209 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59210 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59211 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59212 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59213 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59214 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59215 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59216 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59217 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59218 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59219 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59220 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59221 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59222 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59223 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59224 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59225 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59226 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59227 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59228 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59229 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59230 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59231 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59232 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59233 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59234 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59235 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59236 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59237 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59238 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59239 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59240 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59241 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59242 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59243 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59244 +
59245 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59246 +
59247 +#endif
59248 +
59249 +#endif
59250 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59251 new file mode 100644
59252 index 0000000..ae576a1
59253 --- /dev/null
59254 +++ b/include/linux/grmsg.h
59255 @@ -0,0 +1,109 @@
59256 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59257 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59258 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59259 +#define GR_STOPMOD_MSG "denied modification of module state by "
59260 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59261 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59262 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59263 +#define GR_IOPL_MSG "denied use of iopl() by "
59264 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59265 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59266 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59267 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59268 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59269 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59270 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59271 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59272 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59273 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59274 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59275 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59276 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59277 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59278 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59279 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59280 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59281 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59282 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59283 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59284 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59285 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59286 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59287 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59288 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59289 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59290 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59291 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59292 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59293 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59294 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59295 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59296 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59297 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59298 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59299 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59300 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59301 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59302 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59303 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59304 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59305 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59306 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59307 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
59308 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59309 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59310 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59311 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59312 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59313 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59314 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59315 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59316 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59317 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59318 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59319 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59320 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59321 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59322 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59323 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59324 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59325 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59326 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59327 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59328 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59329 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59330 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59331 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59332 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59333 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59334 +#define GR_TIME_MSG "time set by "
59335 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59336 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59337 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59338 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59339 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59340 +#define GR_BIND_MSG "denied bind() by "
59341 +#define GR_CONNECT_MSG "denied connect() by "
59342 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59343 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59344 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59345 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59346 +#define GR_CAP_ACL_MSG "use of %s denied for "
59347 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59348 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59349 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59350 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59351 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59352 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59353 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59354 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59355 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59356 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59357 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59358 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59359 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59360 +#define GR_VM86_MSG "denied use of vm86 by "
59361 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59362 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59363 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59364 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59365 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59366 new file mode 100644
59367 index 0000000..2ccf677
59368 --- /dev/null
59369 +++ b/include/linux/grsecurity.h
59370 @@ -0,0 +1,229 @@
59371 +#ifndef GR_SECURITY_H
59372 +#define GR_SECURITY_H
59373 +#include <linux/fs.h>
59374 +#include <linux/fs_struct.h>
59375 +#include <linux/binfmts.h>
59376 +#include <linux/gracl.h>
59377 +
59378 +/* notify of brain-dead configs */
59379 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59380 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59381 +#endif
59382 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59383 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59384 +#endif
59385 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59386 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59387 +#endif
59388 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59389 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59390 +#endif
59391 +
59392 +#include <linux/compat.h>
59393 +
59394 +struct user_arg_ptr {
59395 +#ifdef CONFIG_COMPAT
59396 + bool is_compat;
59397 +#endif
59398 + union {
59399 + const char __user *const __user *native;
59400 +#ifdef CONFIG_COMPAT
59401 + compat_uptr_t __user *compat;
59402 +#endif
59403 + } ptr;
59404 +};
59405 +
59406 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59407 +void gr_handle_brute_check(void);
59408 +void gr_handle_kernel_exploit(void);
59409 +int gr_process_user_ban(void);
59410 +
59411 +char gr_roletype_to_char(void);
59412 +
59413 +int gr_acl_enable_at_secure(void);
59414 +
59415 +int gr_check_user_change(int real, int effective, int fs);
59416 +int gr_check_group_change(int real, int effective, int fs);
59417 +
59418 +void gr_del_task_from_ip_table(struct task_struct *p);
59419 +
59420 +int gr_pid_is_chrooted(struct task_struct *p);
59421 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59422 +int gr_handle_chroot_nice(void);
59423 +int gr_handle_chroot_sysctl(const int op);
59424 +int gr_handle_chroot_setpriority(struct task_struct *p,
59425 + const int niceval);
59426 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59427 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59428 + const struct vfsmount *mnt);
59429 +void gr_handle_chroot_chdir(struct path *path);
59430 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59431 + const struct vfsmount *mnt, const int mode);
59432 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59433 + const struct vfsmount *mnt, const int mode);
59434 +int gr_handle_chroot_mount(const struct dentry *dentry,
59435 + const struct vfsmount *mnt,
59436 + const char *dev_name);
59437 +int gr_handle_chroot_pivot(void);
59438 +int gr_handle_chroot_unix(const pid_t pid);
59439 +
59440 +int gr_handle_rawio(const struct inode *inode);
59441 +
59442 +void gr_handle_ioperm(void);
59443 +void gr_handle_iopl(void);
59444 +
59445 +umode_t gr_acl_umask(void);
59446 +
59447 +int gr_tpe_allow(const struct file *file);
59448 +
59449 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59450 +void gr_clear_chroot_entries(struct task_struct *task);
59451 +
59452 +void gr_log_forkfail(const int retval);
59453 +void gr_log_timechange(void);
59454 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59455 +void gr_log_chdir(const struct dentry *dentry,
59456 + const struct vfsmount *mnt);
59457 +void gr_log_chroot_exec(const struct dentry *dentry,
59458 + const struct vfsmount *mnt);
59459 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59460 +void gr_log_remount(const char *devname, const int retval);
59461 +void gr_log_unmount(const char *devname, const int retval);
59462 +void gr_log_mount(const char *from, const char *to, const int retval);
59463 +void gr_log_textrel(struct vm_area_struct *vma);
59464 +void gr_log_rwxmmap(struct file *file);
59465 +void gr_log_rwxmprotect(struct file *file);
59466 +
59467 +int gr_handle_follow_link(const struct inode *parent,
59468 + const struct inode *inode,
59469 + const struct dentry *dentry,
59470 + const struct vfsmount *mnt);
59471 +int gr_handle_fifo(const struct dentry *dentry,
59472 + const struct vfsmount *mnt,
59473 + const struct dentry *dir, const int flag,
59474 + const int acc_mode);
59475 +int gr_handle_hardlink(const struct dentry *dentry,
59476 + const struct vfsmount *mnt,
59477 + struct inode *inode,
59478 + const int mode, const char *to);
59479 +
59480 +int gr_is_capable(const int cap);
59481 +int gr_is_capable_nolog(const int cap);
59482 +void gr_learn_resource(const struct task_struct *task, const int limit,
59483 + const unsigned long wanted, const int gt);
59484 +void gr_copy_label(struct task_struct *tsk);
59485 +void gr_handle_crash(struct task_struct *task, const int sig);
59486 +int gr_handle_signal(const struct task_struct *p, const int sig);
59487 +int gr_check_crash_uid(const uid_t uid);
59488 +int gr_check_protected_task(const struct task_struct *task);
59489 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59490 +int gr_acl_handle_mmap(const struct file *file,
59491 + const unsigned long prot);
59492 +int gr_acl_handle_mprotect(const struct file *file,
59493 + const unsigned long prot);
59494 +int gr_check_hidden_task(const struct task_struct *tsk);
59495 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59496 + const struct vfsmount *mnt);
59497 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59498 + const struct vfsmount *mnt);
59499 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59500 + const struct vfsmount *mnt, const int fmode);
59501 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59502 + const struct vfsmount *mnt, umode_t *mode);
59503 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59504 + const struct vfsmount *mnt);
59505 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59506 + const struct vfsmount *mnt);
59507 +int gr_handle_ptrace(struct task_struct *task, const long request);
59508 +int gr_handle_proc_ptrace(struct task_struct *task);
59509 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59510 + const struct vfsmount *mnt);
59511 +int gr_check_crash_exec(const struct file *filp);
59512 +int gr_acl_is_enabled(void);
59513 +void gr_set_kernel_label(struct task_struct *task);
59514 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59515 + const gid_t gid);
59516 +int gr_set_proc_label(const struct dentry *dentry,
59517 + const struct vfsmount *mnt,
59518 + const int unsafe_flags);
59519 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59520 + const struct vfsmount *mnt);
59521 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59522 + const struct vfsmount *mnt, int acc_mode);
59523 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59524 + const struct dentry *p_dentry,
59525 + const struct vfsmount *p_mnt,
59526 + int open_flags, int acc_mode, const int imode);
59527 +void gr_handle_create(const struct dentry *dentry,
59528 + const struct vfsmount *mnt);
59529 +void gr_handle_proc_create(const struct dentry *dentry,
59530 + const struct inode *inode);
59531 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59532 + const struct dentry *parent_dentry,
59533 + const struct vfsmount *parent_mnt,
59534 + const int mode);
59535 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59536 + const struct dentry *parent_dentry,
59537 + const struct vfsmount *parent_mnt);
59538 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59539 + const struct vfsmount *mnt);
59540 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59541 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59542 + const struct vfsmount *mnt);
59543 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59544 + const struct dentry *parent_dentry,
59545 + const struct vfsmount *parent_mnt,
59546 + const char *from);
59547 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59548 + const struct dentry *parent_dentry,
59549 + const struct vfsmount *parent_mnt,
59550 + const struct dentry *old_dentry,
59551 + const struct vfsmount *old_mnt, const char *to);
59552 +int gr_acl_handle_rename(struct dentry *new_dentry,
59553 + struct dentry *parent_dentry,
59554 + const struct vfsmount *parent_mnt,
59555 + struct dentry *old_dentry,
59556 + struct inode *old_parent_inode,
59557 + struct vfsmount *old_mnt, const char *newname);
59558 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59559 + struct dentry *old_dentry,
59560 + struct dentry *new_dentry,
59561 + struct vfsmount *mnt, const __u8 replace);
59562 +__u32 gr_check_link(const struct dentry *new_dentry,
59563 + const struct dentry *parent_dentry,
59564 + const struct vfsmount *parent_mnt,
59565 + const struct dentry *old_dentry,
59566 + const struct vfsmount *old_mnt);
59567 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59568 + const unsigned int namelen, const ino_t ino);
59569 +
59570 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59571 + const struct vfsmount *mnt);
59572 +void gr_acl_handle_exit(void);
59573 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59574 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59575 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59576 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59577 +void gr_audit_ptrace(struct task_struct *task);
59578 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59579 +
59580 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59581 +
59582 +#ifdef CONFIG_GRKERNSEC
59583 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59584 +void gr_handle_vm86(void);
59585 +void gr_handle_mem_readwrite(u64 from, u64 to);
59586 +
59587 +void gr_log_badprocpid(const char *entry);
59588 +
59589 +extern int grsec_enable_dmesg;
59590 +extern int grsec_disable_privio;
59591 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59592 +extern int grsec_enable_chroot_findtask;
59593 +#endif
59594 +#ifdef CONFIG_GRKERNSEC_SETXID
59595 +extern int grsec_enable_setxid;
59596 +#endif
59597 +#endif
59598 +
59599 +#endif
59600 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59601 new file mode 100644
59602 index 0000000..e7ffaaf
59603 --- /dev/null
59604 +++ b/include/linux/grsock.h
59605 @@ -0,0 +1,19 @@
59606 +#ifndef __GRSOCK_H
59607 +#define __GRSOCK_H
59608 +
59609 +extern void gr_attach_curr_ip(const struct sock *sk);
59610 +extern int gr_handle_sock_all(const int family, const int type,
59611 + const int protocol);
59612 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59613 +extern int gr_handle_sock_server_other(const struct sock *sck);
59614 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59615 +extern int gr_search_connect(struct socket * sock,
59616 + struct sockaddr_in * addr);
59617 +extern int gr_search_bind(struct socket * sock,
59618 + struct sockaddr_in * addr);
59619 +extern int gr_search_listen(struct socket * sock);
59620 +extern int gr_search_accept(struct socket * sock);
59621 +extern int gr_search_socket(const int domain, const int type,
59622 + const int protocol);
59623 +
59624 +#endif
59625 diff --git a/include/linux/hid.h b/include/linux/hid.h
59626 index c235e4e..f0cf7a0 100644
59627 --- a/include/linux/hid.h
59628 +++ b/include/linux/hid.h
59629 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59630 unsigned int code, int value);
59631
59632 int (*parse)(struct hid_device *hdev);
59633 -};
59634 +} __no_const;
59635
59636 #define PM_HINT_FULLON 1<<5
59637 #define PM_HINT_NORMAL 1<<1
59638 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59639 index 3a93f73..b19d0b3 100644
59640 --- a/include/linux/highmem.h
59641 +++ b/include/linux/highmem.h
59642 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59643 kunmap_atomic(kaddr, KM_USER0);
59644 }
59645
59646 +static inline void sanitize_highpage(struct page *page)
59647 +{
59648 + void *kaddr;
59649 + unsigned long flags;
59650 +
59651 + local_irq_save(flags);
59652 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59653 + clear_page(kaddr);
59654 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59655 + local_irq_restore(flags);
59656 +}
59657 +
59658 static inline void zero_user_segments(struct page *page,
59659 unsigned start1, unsigned end1,
59660 unsigned start2, unsigned end2)
59661 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59662 index 07d103a..04ec65b 100644
59663 --- a/include/linux/i2c.h
59664 +++ b/include/linux/i2c.h
59665 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59666 /* To determine what the adapter supports */
59667 u32 (*functionality) (struct i2c_adapter *);
59668 };
59669 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59670
59671 /*
59672 * i2c_adapter is the structure used to identify a physical i2c bus along
59673 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59674 index a6deef4..c56a7f2 100644
59675 --- a/include/linux/i2o.h
59676 +++ b/include/linux/i2o.h
59677 @@ -564,7 +564,7 @@ struct i2o_controller {
59678 struct i2o_device *exec; /* Executive */
59679 #if BITS_PER_LONG == 64
59680 spinlock_t context_list_lock; /* lock for context_list */
59681 - atomic_t context_list_counter; /* needed for unique contexts */
59682 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59683 struct list_head context_list; /* list of context id's
59684 and pointers */
59685 #endif
59686 diff --git a/include/linux/init.h b/include/linux/init.h
59687 index 9146f39..885354d 100644
59688 --- a/include/linux/init.h
59689 +++ b/include/linux/init.h
59690 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59691
59692 /* Each module must use one module_init(). */
59693 #define module_init(initfn) \
59694 - static inline initcall_t __inittest(void) \
59695 + static inline __used initcall_t __inittest(void) \
59696 { return initfn; } \
59697 int init_module(void) __attribute__((alias(#initfn)));
59698
59699 /* This is only required if you want to be unloadable. */
59700 #define module_exit(exitfn) \
59701 - static inline exitcall_t __exittest(void) \
59702 + static inline __used exitcall_t __exittest(void) \
59703 { return exitfn; } \
59704 void cleanup_module(void) __attribute__((alias(#exitfn)));
59705
59706 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59707 index 32574ee..00d4ef1 100644
59708 --- a/include/linux/init_task.h
59709 +++ b/include/linux/init_task.h
59710 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59711
59712 #define INIT_TASK_COMM "swapper"
59713
59714 +#ifdef CONFIG_X86
59715 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59716 +#else
59717 +#define INIT_TASK_THREAD_INFO
59718 +#endif
59719 +
59720 /*
59721 * INIT_TASK is used to set up the first task table, touch at
59722 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59723 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59724 RCU_INIT_POINTER(.cred, &init_cred), \
59725 .comm = INIT_TASK_COMM, \
59726 .thread = INIT_THREAD, \
59727 + INIT_TASK_THREAD_INFO \
59728 .fs = &init_fs, \
59729 .files = &init_files, \
59730 .signal = &init_signals, \
59731 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59732 index e6ca56d..8583707 100644
59733 --- a/include/linux/intel-iommu.h
59734 +++ b/include/linux/intel-iommu.h
59735 @@ -296,7 +296,7 @@ struct iommu_flush {
59736 u8 fm, u64 type);
59737 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59738 unsigned int size_order, u64 type);
59739 -};
59740 +} __no_const;
59741
59742 enum {
59743 SR_DMAR_FECTL_REG,
59744 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59745 index a64b00e..464d8bc 100644
59746 --- a/include/linux/interrupt.h
59747 +++ b/include/linux/interrupt.h
59748 @@ -441,7 +441,7 @@ enum
59749 /* map softirq index to softirq name. update 'softirq_to_name' in
59750 * kernel/softirq.c when adding a new softirq.
59751 */
59752 -extern char *softirq_to_name[NR_SOFTIRQS];
59753 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59754
59755 /* softirq mask and active fields moved to irq_cpustat_t in
59756 * asm/hardirq.h to get better cache usage. KAO
59757 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59758
59759 struct softirq_action
59760 {
59761 - void (*action)(struct softirq_action *);
59762 + void (*action)(void);
59763 };
59764
59765 asmlinkage void do_softirq(void);
59766 asmlinkage void __do_softirq(void);
59767 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59768 +extern void open_softirq(int nr, void (*action)(void));
59769 extern void softirq_init(void);
59770 static inline void __raise_softirq_irqoff(unsigned int nr)
59771 {
59772 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59773 index 3875719..4cd454c 100644
59774 --- a/include/linux/kallsyms.h
59775 +++ b/include/linux/kallsyms.h
59776 @@ -15,7 +15,8 @@
59777
59778 struct module;
59779
59780 -#ifdef CONFIG_KALLSYMS
59781 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59782 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59783 /* Lookup the address for a symbol. Returns 0 if not found. */
59784 unsigned long kallsyms_lookup_name(const char *name);
59785
59786 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59787 /* Stupid that this does nothing, but I didn't create this mess. */
59788 #define __print_symbol(fmt, addr)
59789 #endif /*CONFIG_KALLSYMS*/
59790 +#else /* when included by kallsyms.c, vsnprintf.c, or
59791 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59792 +extern void __print_symbol(const char *fmt, unsigned long address);
59793 +extern int sprint_backtrace(char *buffer, unsigned long address);
59794 +extern int sprint_symbol(char *buffer, unsigned long address);
59795 +const char *kallsyms_lookup(unsigned long addr,
59796 + unsigned long *symbolsize,
59797 + unsigned long *offset,
59798 + char **modname, char *namebuf);
59799 +#endif
59800
59801 /* This macro allows us to keep printk typechecking */
59802 static __printf(1, 2)
59803 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59804 index fa39183..40160be 100644
59805 --- a/include/linux/kgdb.h
59806 +++ b/include/linux/kgdb.h
59807 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59808 extern int kgdb_io_module_registered;
59809
59810 extern atomic_t kgdb_setting_breakpoint;
59811 -extern atomic_t kgdb_cpu_doing_single_step;
59812 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59813
59814 extern struct task_struct *kgdb_usethread;
59815 extern struct task_struct *kgdb_contthread;
59816 @@ -251,7 +251,7 @@ struct kgdb_arch {
59817 void (*disable_hw_break)(struct pt_regs *regs);
59818 void (*remove_all_hw_break)(void);
59819 void (*correct_hw_break)(void);
59820 -};
59821 +} __do_const;
59822
59823 /**
59824 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59825 @@ -276,7 +276,7 @@ struct kgdb_io {
59826 void (*pre_exception) (void);
59827 void (*post_exception) (void);
59828 int is_console;
59829 -};
59830 +} __do_const;
59831
59832 extern struct kgdb_arch arch_kgdb_ops;
59833
59834 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59835 index b16f653..eb908f4 100644
59836 --- a/include/linux/kmod.h
59837 +++ b/include/linux/kmod.h
59838 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59839 * usually useless though. */
59840 extern __printf(2, 3)
59841 int __request_module(bool wait, const char *name, ...);
59842 +extern __printf(3, 4)
59843 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59844 #define request_module(mod...) __request_module(true, mod)
59845 #define request_module_nowait(mod...) __request_module(false, mod)
59846 #define try_then_request_module(x, mod...) \
59847 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59848 index d526231..086e89b 100644
59849 --- a/include/linux/kvm_host.h
59850 +++ b/include/linux/kvm_host.h
59851 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59852 void vcpu_load(struct kvm_vcpu *vcpu);
59853 void vcpu_put(struct kvm_vcpu *vcpu);
59854
59855 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59856 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59857 struct module *module);
59858 void kvm_exit(void);
59859
59860 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59861 struct kvm_guest_debug *dbg);
59862 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59863
59864 -int kvm_arch_init(void *opaque);
59865 +int kvm_arch_init(const void *opaque);
59866 void kvm_arch_exit(void);
59867
59868 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59869 diff --git a/include/linux/libata.h b/include/linux/libata.h
59870 index cafc09a..d7e7829 100644
59871 --- a/include/linux/libata.h
59872 +++ b/include/linux/libata.h
59873 @@ -909,7 +909,7 @@ struct ata_port_operations {
59874 * fields must be pointers.
59875 */
59876 const struct ata_port_operations *inherits;
59877 -};
59878 +} __do_const;
59879
59880 struct ata_port_info {
59881 unsigned long flags;
59882 diff --git a/include/linux/mca.h b/include/linux/mca.h
59883 index 3797270..7765ede 100644
59884 --- a/include/linux/mca.h
59885 +++ b/include/linux/mca.h
59886 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59887 int region);
59888 void * (*mca_transform_memory)(struct mca_device *,
59889 void *memory);
59890 -};
59891 +} __no_const;
59892
59893 struct mca_bus {
59894 u64 default_dma_mask;
59895 diff --git a/include/linux/memory.h b/include/linux/memory.h
59896 index 935699b..11042cc 100644
59897 --- a/include/linux/memory.h
59898 +++ b/include/linux/memory.h
59899 @@ -144,7 +144,7 @@ struct memory_accessor {
59900 size_t count);
59901 ssize_t (*write)(struct memory_accessor *, const char *buf,
59902 off_t offset, size_t count);
59903 -};
59904 +} __no_const;
59905
59906 /*
59907 * Kernel text modification mutex, used for code patching. Users of this lock
59908 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59909 index 9970337..9444122 100644
59910 --- a/include/linux/mfd/abx500.h
59911 +++ b/include/linux/mfd/abx500.h
59912 @@ -188,6 +188,7 @@ struct abx500_ops {
59913 int (*event_registers_startup_state_get) (struct device *, u8 *);
59914 int (*startup_irq_enabled) (struct device *, unsigned int);
59915 };
59916 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59917
59918 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59919 void abx500_remove_ops(struct device *dev);
59920 diff --git a/include/linux/mm.h b/include/linux/mm.h
59921 index 4baadd1..2e0b45e 100644
59922 --- a/include/linux/mm.h
59923 +++ b/include/linux/mm.h
59924 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59925
59926 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59927 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59928 +
59929 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59930 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59931 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59932 +#else
59933 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59934 +#endif
59935 +
59936 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59937 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59938
59939 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59940 int set_page_dirty_lock(struct page *page);
59941 int clear_page_dirty_for_io(struct page *page);
59942
59943 -/* Is the vma a continuation of the stack vma above it? */
59944 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59945 -{
59946 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59947 -}
59948 -
59949 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59950 - unsigned long addr)
59951 -{
59952 - return (vma->vm_flags & VM_GROWSDOWN) &&
59953 - (vma->vm_start == addr) &&
59954 - !vma_growsdown(vma->vm_prev, addr);
59955 -}
59956 -
59957 -/* Is the vma a continuation of the stack vma below it? */
59958 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59959 -{
59960 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59961 -}
59962 -
59963 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59964 - unsigned long addr)
59965 -{
59966 - return (vma->vm_flags & VM_GROWSUP) &&
59967 - (vma->vm_end == addr) &&
59968 - !vma_growsup(vma->vm_next, addr);
59969 -}
59970 -
59971 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59972 unsigned long old_addr, struct vm_area_struct *new_vma,
59973 unsigned long new_addr, unsigned long len);
59974 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59975 }
59976 #endif
59977
59978 +#ifdef CONFIG_MMU
59979 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59980 +#else
59981 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59982 +{
59983 + return __pgprot(0);
59984 +}
59985 +#endif
59986 +
59987 int vma_wants_writenotify(struct vm_area_struct *vma);
59988
59989 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59990 @@ -1419,6 +1407,7 @@ out:
59991 }
59992
59993 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59994 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59995
59996 extern unsigned long do_brk(unsigned long, unsigned long);
59997
59998 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59999 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60000 struct vm_area_struct **pprev);
60001
60002 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60003 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60004 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60005 +
60006 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60007 NULL if none. Assume start_addr < end_addr. */
60008 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60009 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
60010 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60011 }
60012
60013 -#ifdef CONFIG_MMU
60014 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
60015 -#else
60016 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
60017 -{
60018 - return __pgprot(0);
60019 -}
60020 -#endif
60021 -
60022 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60023 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60024 unsigned long pfn, unsigned long size, pgprot_t);
60025 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
60026 extern int sysctl_memory_failure_early_kill;
60027 extern int sysctl_memory_failure_recovery;
60028 extern void shake_page(struct page *p, int access);
60029 -extern atomic_long_t mce_bad_pages;
60030 +extern atomic_long_unchecked_t mce_bad_pages;
60031 extern int soft_offline_page(struct page *page, int flags);
60032
60033 extern void dump_page(struct page *page);
60034 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
60035 unsigned int pages_per_huge_page);
60036 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
60037
60038 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60039 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60040 +#else
60041 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60042 +#endif
60043 +
60044 #endif /* __KERNEL__ */
60045 #endif /* _LINUX_MM_H */
60046 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
60047 index 5b42f1b..759e4b4 100644
60048 --- a/include/linux/mm_types.h
60049 +++ b/include/linux/mm_types.h
60050 @@ -253,6 +253,8 @@ struct vm_area_struct {
60051 #ifdef CONFIG_NUMA
60052 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60053 #endif
60054 +
60055 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60056 };
60057
60058 struct core_thread {
60059 @@ -389,6 +391,24 @@ struct mm_struct {
60060 #ifdef CONFIG_CPUMASK_OFFSTACK
60061 struct cpumask cpumask_allocation;
60062 #endif
60063 +
60064 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60065 + unsigned long pax_flags;
60066 +#endif
60067 +
60068 +#ifdef CONFIG_PAX_DLRESOLVE
60069 + unsigned long call_dl_resolve;
60070 +#endif
60071 +
60072 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60073 + unsigned long call_syscall;
60074 +#endif
60075 +
60076 +#ifdef CONFIG_PAX_ASLR
60077 + unsigned long delta_mmap; /* randomized offset */
60078 + unsigned long delta_stack; /* randomized offset */
60079 +#endif
60080 +
60081 };
60082
60083 static inline void mm_init_cpumask(struct mm_struct *mm)
60084 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
60085 index 1d1b1e1..2a13c78 100644
60086 --- a/include/linux/mmu_notifier.h
60087 +++ b/include/linux/mmu_notifier.h
60088 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
60089 */
60090 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60091 ({ \
60092 - pte_t __pte; \
60093 + pte_t ___pte; \
60094 struct vm_area_struct *___vma = __vma; \
60095 unsigned long ___address = __address; \
60096 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60097 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60098 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60099 - __pte; \
60100 + ___pte; \
60101 })
60102
60103 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
60104 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
60105 index 188cb2f..d78409b 100644
60106 --- a/include/linux/mmzone.h
60107 +++ b/include/linux/mmzone.h
60108 @@ -369,7 +369,7 @@ struct zone {
60109 unsigned long flags; /* zone flags, see below */
60110
60111 /* Zone statistics */
60112 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60113 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60114
60115 /*
60116 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
60117 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
60118 index 468819c..17b9db3 100644
60119 --- a/include/linux/mod_devicetable.h
60120 +++ b/include/linux/mod_devicetable.h
60121 @@ -12,7 +12,7 @@
60122 typedef unsigned long kernel_ulong_t;
60123 #endif
60124
60125 -#define PCI_ANY_ID (~0)
60126 +#define PCI_ANY_ID ((__u16)~0)
60127
60128 struct pci_device_id {
60129 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60130 @@ -131,7 +131,7 @@ struct usb_device_id {
60131 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60132 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60133
60134 -#define HID_ANY_ID (~0)
60135 +#define HID_ANY_ID (~0U)
60136
60137 struct hid_device_id {
60138 __u16 bus;
60139 diff --git a/include/linux/module.h b/include/linux/module.h
60140 index 3cb7839..511cb87 100644
60141 --- a/include/linux/module.h
60142 +++ b/include/linux/module.h
60143 @@ -17,6 +17,7 @@
60144 #include <linux/moduleparam.h>
60145 #include <linux/tracepoint.h>
60146 #include <linux/export.h>
60147 +#include <linux/fs.h>
60148
60149 #include <linux/percpu.h>
60150 #include <asm/module.h>
60151 @@ -261,19 +262,16 @@ struct module
60152 int (*init)(void);
60153
60154 /* If this is non-NULL, vfree after init() returns */
60155 - void *module_init;
60156 + void *module_init_rx, *module_init_rw;
60157
60158 /* Here is the actual code + data, vfree'd on unload. */
60159 - void *module_core;
60160 + void *module_core_rx, *module_core_rw;
60161
60162 /* Here are the sizes of the init and core sections */
60163 - unsigned int init_size, core_size;
60164 + unsigned int init_size_rw, core_size_rw;
60165
60166 /* The size of the executable code in each section. */
60167 - unsigned int init_text_size, core_text_size;
60168 -
60169 - /* Size of RO sections of the module (text+rodata) */
60170 - unsigned int init_ro_size, core_ro_size;
60171 + unsigned int init_size_rx, core_size_rx;
60172
60173 /* Arch-specific module values */
60174 struct mod_arch_specific arch;
60175 @@ -329,6 +327,10 @@ struct module
60176 #ifdef CONFIG_EVENT_TRACING
60177 struct ftrace_event_call **trace_events;
60178 unsigned int num_trace_events;
60179 + struct file_operations trace_id;
60180 + struct file_operations trace_enable;
60181 + struct file_operations trace_format;
60182 + struct file_operations trace_filter;
60183 #endif
60184 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60185 unsigned int num_ftrace_callsites;
60186 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60187 bool is_module_percpu_address(unsigned long addr);
60188 bool is_module_text_address(unsigned long addr);
60189
60190 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60191 +{
60192 +
60193 +#ifdef CONFIG_PAX_KERNEXEC
60194 + if (ktla_ktva(addr) >= (unsigned long)start &&
60195 + ktla_ktva(addr) < (unsigned long)start + size)
60196 + return 1;
60197 +#endif
60198 +
60199 + return ((void *)addr >= start && (void *)addr < start + size);
60200 +}
60201 +
60202 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60203 +{
60204 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60205 +}
60206 +
60207 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60208 +{
60209 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60210 +}
60211 +
60212 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60213 +{
60214 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60215 +}
60216 +
60217 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60218 +{
60219 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60220 +}
60221 +
60222 static inline int within_module_core(unsigned long addr, struct module *mod)
60223 {
60224 - return (unsigned long)mod->module_core <= addr &&
60225 - addr < (unsigned long)mod->module_core + mod->core_size;
60226 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60227 }
60228
60229 static inline int within_module_init(unsigned long addr, struct module *mod)
60230 {
60231 - return (unsigned long)mod->module_init <= addr &&
60232 - addr < (unsigned long)mod->module_init + mod->init_size;
60233 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60234 }
60235
60236 /* Search for module by name: must hold module_mutex. */
60237 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60238 index b2be02e..6a9fdb1 100644
60239 --- a/include/linux/moduleloader.h
60240 +++ b/include/linux/moduleloader.h
60241 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60242 sections. Returns NULL on failure. */
60243 void *module_alloc(unsigned long size);
60244
60245 +#ifdef CONFIG_PAX_KERNEXEC
60246 +void *module_alloc_exec(unsigned long size);
60247 +#else
60248 +#define module_alloc_exec(x) module_alloc(x)
60249 +#endif
60250 +
60251 /* Free memory returned from module_alloc. */
60252 void module_free(struct module *mod, void *module_region);
60253
60254 +#ifdef CONFIG_PAX_KERNEXEC
60255 +void module_free_exec(struct module *mod, void *module_region);
60256 +#else
60257 +#define module_free_exec(x, y) module_free((x), (y))
60258 +#endif
60259 +
60260 /* Apply the given relocation to the (simplified) ELF. Return -error
60261 or 0. */
60262 int apply_relocate(Elf_Shdr *sechdrs,
60263 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60264 index 7939f63..ec6df57 100644
60265 --- a/include/linux/moduleparam.h
60266 +++ b/include/linux/moduleparam.h
60267 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60268 * @len is usually just sizeof(string).
60269 */
60270 #define module_param_string(name, string, len, perm) \
60271 - static const struct kparam_string __param_string_##name \
60272 + static const struct kparam_string __param_string_##name __used \
60273 = { len, string }; \
60274 __module_param_call(MODULE_PARAM_PREFIX, name, \
60275 &param_ops_string, \
60276 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60277 * module_param_named() for why this might be necessary.
60278 */
60279 #define module_param_array_named(name, array, type, nump, perm) \
60280 - static const struct kparam_array __param_arr_##name \
60281 + static const struct kparam_array __param_arr_##name __used \
60282 = { .max = ARRAY_SIZE(array), .num = nump, \
60283 .ops = &param_ops_##type, \
60284 .elemsize = sizeof(array[0]), .elem = array }; \
60285 diff --git a/include/linux/namei.h b/include/linux/namei.h
60286 index ffc0213..2c1f2cb 100644
60287 --- a/include/linux/namei.h
60288 +++ b/include/linux/namei.h
60289 @@ -24,7 +24,7 @@ struct nameidata {
60290 unsigned seq;
60291 int last_type;
60292 unsigned depth;
60293 - char *saved_names[MAX_NESTED_LINKS + 1];
60294 + const char *saved_names[MAX_NESTED_LINKS + 1];
60295
60296 /* Intent data */
60297 union {
60298 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60299 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60300 extern void unlock_rename(struct dentry *, struct dentry *);
60301
60302 -static inline void nd_set_link(struct nameidata *nd, char *path)
60303 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60304 {
60305 nd->saved_names[nd->depth] = path;
60306 }
60307
60308 -static inline char *nd_get_link(struct nameidata *nd)
60309 +static inline const char *nd_get_link(const struct nameidata *nd)
60310 {
60311 return nd->saved_names[nd->depth];
60312 }
60313 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60314 index a82ad4d..90d15b7 100644
60315 --- a/include/linux/netdevice.h
60316 +++ b/include/linux/netdevice.h
60317 @@ -949,6 +949,7 @@ struct net_device_ops {
60318 int (*ndo_set_features)(struct net_device *dev,
60319 u32 features);
60320 };
60321 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60322
60323 /*
60324 * The DEVICE structure.
60325 @@ -1088,7 +1089,7 @@ struct net_device {
60326 int iflink;
60327
60328 struct net_device_stats stats;
60329 - atomic_long_t rx_dropped; /* dropped packets by core network
60330 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60331 * Do not use this in drivers.
60332 */
60333
60334 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60335 new file mode 100644
60336 index 0000000..33f4af8
60337 --- /dev/null
60338 +++ b/include/linux/netfilter/xt_gradm.h
60339 @@ -0,0 +1,9 @@
60340 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60341 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60342 +
60343 +struct xt_gradm_mtinfo {
60344 + __u16 flags;
60345 + __u16 invflags;
60346 +};
60347 +
60348 +#endif
60349 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60350 index c65a18a..0c05f3a 100644
60351 --- a/include/linux/of_pdt.h
60352 +++ b/include/linux/of_pdt.h
60353 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60354
60355 /* return 0 on success; fill in 'len' with number of bytes in path */
60356 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60357 -};
60358 +} __no_const;
60359
60360 extern void *prom_early_alloc(unsigned long size);
60361
60362 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60363 index a4c5624..79d6d88 100644
60364 --- a/include/linux/oprofile.h
60365 +++ b/include/linux/oprofile.h
60366 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60367 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60368 char const * name, ulong * val);
60369
60370 -/** Create a file for read-only access to an atomic_t. */
60371 +/** Create a file for read-only access to an atomic_unchecked_t. */
60372 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60373 - char const * name, atomic_t * val);
60374 + char const * name, atomic_unchecked_t * val);
60375
60376 /** create a directory */
60377 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60378 diff --git a/include/linux/padata.h b/include/linux/padata.h
60379 index 4633b2f..988bc08 100644
60380 --- a/include/linux/padata.h
60381 +++ b/include/linux/padata.h
60382 @@ -129,7 +129,7 @@ struct parallel_data {
60383 struct padata_instance *pinst;
60384 struct padata_parallel_queue __percpu *pqueue;
60385 struct padata_serial_queue __percpu *squeue;
60386 - atomic_t seq_nr;
60387 + atomic_unchecked_t seq_nr;
60388 atomic_t reorder_objects;
60389 atomic_t refcnt;
60390 unsigned int max_seq_nr;
60391 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60392 index b1f8912..c955bff 100644
60393 --- a/include/linux/perf_event.h
60394 +++ b/include/linux/perf_event.h
60395 @@ -748,8 +748,8 @@ struct perf_event {
60396
60397 enum perf_event_active_state state;
60398 unsigned int attach_state;
60399 - local64_t count;
60400 - atomic64_t child_count;
60401 + local64_t count; /* PaX: fix it one day */
60402 + atomic64_unchecked_t child_count;
60403
60404 /*
60405 * These are the total time in nanoseconds that the event
60406 @@ -800,8 +800,8 @@ struct perf_event {
60407 * These accumulate total time (in nanoseconds) that children
60408 * events have been enabled and running, respectively.
60409 */
60410 - atomic64_t child_total_time_enabled;
60411 - atomic64_t child_total_time_running;
60412 + atomic64_unchecked_t child_total_time_enabled;
60413 + atomic64_unchecked_t child_total_time_running;
60414
60415 /*
60416 * Protect attach/detach and child_list:
60417 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60418 index 77257c9..51d473a 100644
60419 --- a/include/linux/pipe_fs_i.h
60420 +++ b/include/linux/pipe_fs_i.h
60421 @@ -46,9 +46,9 @@ struct pipe_buffer {
60422 struct pipe_inode_info {
60423 wait_queue_head_t wait;
60424 unsigned int nrbufs, curbuf, buffers;
60425 - unsigned int readers;
60426 - unsigned int writers;
60427 - unsigned int waiting_writers;
60428 + atomic_t readers;
60429 + atomic_t writers;
60430 + atomic_t waiting_writers;
60431 unsigned int r_counter;
60432 unsigned int w_counter;
60433 struct page *tmp_page;
60434 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60435 index d3085e7..fd01052 100644
60436 --- a/include/linux/pm_runtime.h
60437 +++ b/include/linux/pm_runtime.h
60438 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60439
60440 static inline void pm_runtime_mark_last_busy(struct device *dev)
60441 {
60442 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60443 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60444 }
60445
60446 #else /* !CONFIG_PM_RUNTIME */
60447 diff --git a/include/linux/poison.h b/include/linux/poison.h
60448 index 79159de..f1233a9 100644
60449 --- a/include/linux/poison.h
60450 +++ b/include/linux/poison.h
60451 @@ -19,8 +19,8 @@
60452 * under normal circumstances, used to verify that nobody uses
60453 * non-initialized list entries.
60454 */
60455 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60456 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60457 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60458 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60459
60460 /********** include/linux/timer.h **********/
60461 /*
60462 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60463 index 58969b2..ead129b 100644
60464 --- a/include/linux/preempt.h
60465 +++ b/include/linux/preempt.h
60466 @@ -123,7 +123,7 @@ struct preempt_ops {
60467 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60468 void (*sched_out)(struct preempt_notifier *notifier,
60469 struct task_struct *next);
60470 -};
60471 +} __no_const;
60472
60473 /**
60474 * preempt_notifier - key for installing preemption notifiers
60475 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60476 index 643b96c..ef55a9c 100644
60477 --- a/include/linux/proc_fs.h
60478 +++ b/include/linux/proc_fs.h
60479 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60480 return proc_create_data(name, mode, parent, proc_fops, NULL);
60481 }
60482
60483 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60484 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60485 +{
60486 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60487 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60488 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60489 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60490 +#else
60491 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60492 +#endif
60493 +}
60494 +
60495 +
60496 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60497 mode_t mode, struct proc_dir_entry *base,
60498 read_proc_t *read_proc, void * data)
60499 @@ -258,7 +271,7 @@ union proc_op {
60500 int (*proc_show)(struct seq_file *m,
60501 struct pid_namespace *ns, struct pid *pid,
60502 struct task_struct *task);
60503 -};
60504 +} __no_const;
60505
60506 struct ctl_table_header;
60507 struct ctl_table;
60508 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60509 index 800f113..e9ee2e3 100644
60510 --- a/include/linux/ptrace.h
60511 +++ b/include/linux/ptrace.h
60512 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60513 extern void exit_ptrace(struct task_struct *tracer);
60514 #define PTRACE_MODE_READ 1
60515 #define PTRACE_MODE_ATTACH 2
60516 -/* Returns 0 on success, -errno on denial. */
60517 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60518 /* Returns true on success, false on denial. */
60519 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60520 +/* Returns true on success, false on denial. */
60521 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60522 +/* Returns true on success, false on denial. */
60523 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60524
60525 static inline int ptrace_reparented(struct task_struct *child)
60526 {
60527 diff --git a/include/linux/random.h b/include/linux/random.h
60528 index 8f74538..02a1012 100644
60529 --- a/include/linux/random.h
60530 +++ b/include/linux/random.h
60531 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60532
60533 u32 prandom32(struct rnd_state *);
60534
60535 +static inline unsigned long pax_get_random_long(void)
60536 +{
60537 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60538 +}
60539 +
60540 /*
60541 * Handle minimum values for seeds
60542 */
60543 static inline u32 __seed(u32 x, u32 m)
60544 {
60545 - return (x < m) ? x + m : x;
60546 + return (x <= m) ? x + m + 1 : x;
60547 }
60548
60549 /**
60550 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60551 index e0879a7..a12f962 100644
60552 --- a/include/linux/reboot.h
60553 +++ b/include/linux/reboot.h
60554 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60555 * Architecture-specific implementations of sys_reboot commands.
60556 */
60557
60558 -extern void machine_restart(char *cmd);
60559 -extern void machine_halt(void);
60560 -extern void machine_power_off(void);
60561 +extern void machine_restart(char *cmd) __noreturn;
60562 +extern void machine_halt(void) __noreturn;
60563 +extern void machine_power_off(void) __noreturn;
60564
60565 extern void machine_shutdown(void);
60566 struct pt_regs;
60567 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60568 */
60569
60570 extern void kernel_restart_prepare(char *cmd);
60571 -extern void kernel_restart(char *cmd);
60572 -extern void kernel_halt(void);
60573 -extern void kernel_power_off(void);
60574 +extern void kernel_restart(char *cmd) __noreturn;
60575 +extern void kernel_halt(void) __noreturn;
60576 +extern void kernel_power_off(void) __noreturn;
60577
60578 extern int C_A_D; /* for sysctl */
60579 void ctrl_alt_del(void);
60580 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60581 * Emergency restart, callable from an interrupt handler.
60582 */
60583
60584 -extern void emergency_restart(void);
60585 +extern void emergency_restart(void) __noreturn;
60586 #include <asm/emergency-restart.h>
60587
60588 #endif
60589 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60590 index 96d465f..b084e05 100644
60591 --- a/include/linux/reiserfs_fs.h
60592 +++ b/include/linux/reiserfs_fs.h
60593 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60594 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60595
60596 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60597 -#define get_generation(s) atomic_read (&fs_generation(s))
60598 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60599 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60600 #define __fs_changed(gen,s) (gen != get_generation (s))
60601 #define fs_changed(gen,s) \
60602 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60603 index 52c83b6..18ed7eb 100644
60604 --- a/include/linux/reiserfs_fs_sb.h
60605 +++ b/include/linux/reiserfs_fs_sb.h
60606 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60607 /* Comment? -Hans */
60608 wait_queue_head_t s_wait;
60609 /* To be obsoleted soon by per buffer seals.. -Hans */
60610 - atomic_t s_generation_counter; // increased by one every time the
60611 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60612 // tree gets re-balanced
60613 unsigned long s_properties; /* File system properties. Currently holds
60614 on-disk FS format */
60615 diff --git a/include/linux/relay.h b/include/linux/relay.h
60616 index 14a86bc..17d0700 100644
60617 --- a/include/linux/relay.h
60618 +++ b/include/linux/relay.h
60619 @@ -159,7 +159,7 @@ struct rchan_callbacks
60620 * The callback should return 0 if successful, negative if not.
60621 */
60622 int (*remove_buf_file)(struct dentry *dentry);
60623 -};
60624 +} __no_const;
60625
60626 /*
60627 * CONFIG_RELAY kernel API, kernel/relay.c
60628 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60629 index c6c6084..5bf1212 100644
60630 --- a/include/linux/rfkill.h
60631 +++ b/include/linux/rfkill.h
60632 @@ -147,6 +147,7 @@ struct rfkill_ops {
60633 void (*query)(struct rfkill *rfkill, void *data);
60634 int (*set_block)(void *data, bool blocked);
60635 };
60636 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60637
60638 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60639 /**
60640 diff --git a/include/linux/rio.h b/include/linux/rio.h
60641 index 4d50611..c6858a2 100644
60642 --- a/include/linux/rio.h
60643 +++ b/include/linux/rio.h
60644 @@ -315,7 +315,7 @@ struct rio_ops {
60645 int mbox, void *buffer, size_t len);
60646 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60647 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60648 -};
60649 +} __no_const;
60650
60651 #define RIO_RESOURCE_MEM 0x00000100
60652 #define RIO_RESOURCE_DOORBELL 0x00000200
60653 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60654 index 2148b12..519b820 100644
60655 --- a/include/linux/rmap.h
60656 +++ b/include/linux/rmap.h
60657 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60658 void anon_vma_init(void); /* create anon_vma_cachep */
60659 int anon_vma_prepare(struct vm_area_struct *);
60660 void unlink_anon_vmas(struct vm_area_struct *);
60661 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60662 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60663 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60664 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60665 void __anon_vma_link(struct vm_area_struct *);
60666
60667 static inline void anon_vma_merge(struct vm_area_struct *vma,
60668 diff --git a/include/linux/sched.h b/include/linux/sched.h
60669 index 1c4f3e9..b4e4851 100644
60670 --- a/include/linux/sched.h
60671 +++ b/include/linux/sched.h
60672 @@ -101,6 +101,7 @@ struct bio_list;
60673 struct fs_struct;
60674 struct perf_event_context;
60675 struct blk_plug;
60676 +struct linux_binprm;
60677
60678 /*
60679 * List of flags we want to share for kernel threads,
60680 @@ -380,10 +381,13 @@ struct user_namespace;
60681 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60682
60683 extern int sysctl_max_map_count;
60684 +extern unsigned long sysctl_heap_stack_gap;
60685
60686 #include <linux/aio.h>
60687
60688 #ifdef CONFIG_MMU
60689 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60690 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60691 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60692 extern unsigned long
60693 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60694 @@ -629,6 +633,17 @@ struct signal_struct {
60695 #ifdef CONFIG_TASKSTATS
60696 struct taskstats *stats;
60697 #endif
60698 +
60699 +#ifdef CONFIG_GRKERNSEC
60700 + u32 curr_ip;
60701 + u32 saved_ip;
60702 + u32 gr_saddr;
60703 + u32 gr_daddr;
60704 + u16 gr_sport;
60705 + u16 gr_dport;
60706 + u8 used_accept:1;
60707 +#endif
60708 +
60709 #ifdef CONFIG_AUDIT
60710 unsigned audit_tty;
60711 struct tty_audit_buf *tty_audit_buf;
60712 @@ -710,6 +725,11 @@ struct user_struct {
60713 struct key *session_keyring; /* UID's default session keyring */
60714 #endif
60715
60716 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60717 + unsigned int banned;
60718 + unsigned long ban_expires;
60719 +#endif
60720 +
60721 /* Hash table maintenance information */
60722 struct hlist_node uidhash_node;
60723 uid_t uid;
60724 @@ -1337,8 +1357,8 @@ struct task_struct {
60725 struct list_head thread_group;
60726
60727 struct completion *vfork_done; /* for vfork() */
60728 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60729 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60730 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60731 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60732
60733 cputime_t utime, stime, utimescaled, stimescaled;
60734 cputime_t gtime;
60735 @@ -1354,13 +1374,6 @@ struct task_struct {
60736 struct task_cputime cputime_expires;
60737 struct list_head cpu_timers[3];
60738
60739 -/* process credentials */
60740 - const struct cred __rcu *real_cred; /* objective and real subjective task
60741 - * credentials (COW) */
60742 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60743 - * credentials (COW) */
60744 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60745 -
60746 char comm[TASK_COMM_LEN]; /* executable name excluding path
60747 - access with [gs]et_task_comm (which lock
60748 it with task_lock())
60749 @@ -1377,8 +1390,16 @@ struct task_struct {
60750 #endif
60751 /* CPU-specific state of this task */
60752 struct thread_struct thread;
60753 +/* thread_info moved to task_struct */
60754 +#ifdef CONFIG_X86
60755 + struct thread_info tinfo;
60756 +#endif
60757 /* filesystem information */
60758 struct fs_struct *fs;
60759 +
60760 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60761 + * credentials (COW) */
60762 +
60763 /* open file information */
60764 struct files_struct *files;
60765 /* namespaces */
60766 @@ -1425,6 +1446,11 @@ struct task_struct {
60767 struct rt_mutex_waiter *pi_blocked_on;
60768 #endif
60769
60770 +/* process credentials */
60771 + const struct cred __rcu *real_cred; /* objective and real subjective task
60772 + * credentials (COW) */
60773 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60774 +
60775 #ifdef CONFIG_DEBUG_MUTEXES
60776 /* mutex deadlock detection */
60777 struct mutex_waiter *blocked_on;
60778 @@ -1540,6 +1566,27 @@ struct task_struct {
60779 unsigned long default_timer_slack_ns;
60780
60781 struct list_head *scm_work_list;
60782 +
60783 +#ifdef CONFIG_GRKERNSEC
60784 + /* grsecurity */
60785 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60786 + u64 exec_id;
60787 +#endif
60788 +#ifdef CONFIG_GRKERNSEC_SETXID
60789 + const struct cred *delayed_cred;
60790 +#endif
60791 + struct dentry *gr_chroot_dentry;
60792 + struct acl_subject_label *acl;
60793 + struct acl_role_label *role;
60794 + struct file *exec_file;
60795 + u16 acl_role_id;
60796 + /* is this the task that authenticated to the special role */
60797 + u8 acl_sp_role;
60798 + u8 is_writable;
60799 + u8 brute;
60800 + u8 gr_is_chrooted;
60801 +#endif
60802 +
60803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60804 /* Index of current stored address in ret_stack */
60805 int curr_ret_stack;
60806 @@ -1574,6 +1621,51 @@ struct task_struct {
60807 #endif
60808 };
60809
60810 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60811 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60812 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60813 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60814 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60815 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60816 +
60817 +#ifdef CONFIG_PAX_SOFTMODE
60818 +extern int pax_softmode;
60819 +#endif
60820 +
60821 +extern int pax_check_flags(unsigned long *);
60822 +
60823 +/* if tsk != current then task_lock must be held on it */
60824 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60825 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60826 +{
60827 + if (likely(tsk->mm))
60828 + return tsk->mm->pax_flags;
60829 + else
60830 + return 0UL;
60831 +}
60832 +
60833 +/* if tsk != current then task_lock must be held on it */
60834 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60835 +{
60836 + if (likely(tsk->mm)) {
60837 + tsk->mm->pax_flags = flags;
60838 + return 0;
60839 + }
60840 + return -EINVAL;
60841 +}
60842 +#endif
60843 +
60844 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60845 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60846 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60847 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60848 +#endif
60849 +
60850 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60851 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60852 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60853 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60854 +
60855 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60856 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60857
60858 @@ -2081,7 +2173,9 @@ void yield(void);
60859 extern struct exec_domain default_exec_domain;
60860
60861 union thread_union {
60862 +#ifndef CONFIG_X86
60863 struct thread_info thread_info;
60864 +#endif
60865 unsigned long stack[THREAD_SIZE/sizeof(long)];
60866 };
60867
60868 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60869 */
60870
60871 extern struct task_struct *find_task_by_vpid(pid_t nr);
60872 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60873 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60874 struct pid_namespace *ns);
60875
60876 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60877 extern void mmput(struct mm_struct *);
60878 /* Grab a reference to a task's mm, if it is not already going away */
60879 extern struct mm_struct *get_task_mm(struct task_struct *task);
60880 +/*
60881 + * Grab a reference to a task's mm, if it is not already going away
60882 + * and ptrace_may_access with the mode parameter passed to it
60883 + * succeeds.
60884 + */
60885 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60886 /* Remove the current tasks stale references to the old mm_struct */
60887 extern void mm_release(struct task_struct *, struct mm_struct *);
60888 /* Allocate a new mm structure and copy contents from tsk->mm */
60889 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60890 extern void exit_itimers(struct signal_struct *);
60891 extern void flush_itimer_signals(void);
60892
60893 -extern NORET_TYPE void do_group_exit(int);
60894 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60895
60896 extern void daemonize(const char *, ...);
60897 extern int allow_signal(int);
60898 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60899
60900 #endif
60901
60902 -static inline int object_is_on_stack(void *obj)
60903 +static inline int object_starts_on_stack(void *obj)
60904 {
60905 - void *stack = task_stack_page(current);
60906 + const void *stack = task_stack_page(current);
60907
60908 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60909 }
60910
60911 +#ifdef CONFIG_PAX_USERCOPY
60912 +extern int object_is_on_stack(const void *obj, unsigned long len);
60913 +#endif
60914 +
60915 extern void thread_info_cache_init(void);
60916
60917 #ifdef CONFIG_DEBUG_STACK_USAGE
60918 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60919 index 899fbb4..1cb4138 100644
60920 --- a/include/linux/screen_info.h
60921 +++ b/include/linux/screen_info.h
60922 @@ -43,7 +43,8 @@ struct screen_info {
60923 __u16 pages; /* 0x32 */
60924 __u16 vesa_attributes; /* 0x34 */
60925 __u32 capabilities; /* 0x36 */
60926 - __u8 _reserved[6]; /* 0x3a */
60927 + __u16 vesapm_size; /* 0x3a */
60928 + __u8 _reserved[4]; /* 0x3c */
60929 } __attribute__((packed));
60930
60931 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60932 diff --git a/include/linux/security.h b/include/linux/security.h
60933 index e8c619d..e0cbd1c 100644
60934 --- a/include/linux/security.h
60935 +++ b/include/linux/security.h
60936 @@ -37,6 +37,7 @@
60937 #include <linux/xfrm.h>
60938 #include <linux/slab.h>
60939 #include <linux/xattr.h>
60940 +#include <linux/grsecurity.h>
60941 #include <net/flow.h>
60942
60943 /* Maximum number of letters for an LSM name string */
60944 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60945 index 0b69a46..b2ffa4c 100644
60946 --- a/include/linux/seq_file.h
60947 +++ b/include/linux/seq_file.h
60948 @@ -24,6 +24,9 @@ struct seq_file {
60949 struct mutex lock;
60950 const struct seq_operations *op;
60951 int poll_event;
60952 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60953 + u64 exec_id;
60954 +#endif
60955 void *private;
60956 };
60957
60958 @@ -33,6 +36,7 @@ struct seq_operations {
60959 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60960 int (*show) (struct seq_file *m, void *v);
60961 };
60962 +typedef struct seq_operations __no_const seq_operations_no_const;
60963
60964 #define SEQ_SKIP 1
60965
60966 diff --git a/include/linux/shm.h b/include/linux/shm.h
60967 index 92808b8..c28cac4 100644
60968 --- a/include/linux/shm.h
60969 +++ b/include/linux/shm.h
60970 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60971
60972 /* The task created the shm object. NULL if the task is dead. */
60973 struct task_struct *shm_creator;
60974 +#ifdef CONFIG_GRKERNSEC
60975 + time_t shm_createtime;
60976 + pid_t shm_lapid;
60977 +#endif
60978 };
60979
60980 /* shm_mode upper byte flags */
60981 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60982 index fe86488..1563c1c 100644
60983 --- a/include/linux/skbuff.h
60984 +++ b/include/linux/skbuff.h
60985 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60986 */
60987 static inline int skb_queue_empty(const struct sk_buff_head *list)
60988 {
60989 - return list->next == (struct sk_buff *)list;
60990 + return list->next == (const struct sk_buff *)list;
60991 }
60992
60993 /**
60994 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60995 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60996 const struct sk_buff *skb)
60997 {
60998 - return skb->next == (struct sk_buff *)list;
60999 + return skb->next == (const struct sk_buff *)list;
61000 }
61001
61002 /**
61003 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61004 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61005 const struct sk_buff *skb)
61006 {
61007 - return skb->prev == (struct sk_buff *)list;
61008 + return skb->prev == (const struct sk_buff *)list;
61009 }
61010
61011 /**
61012 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
61013 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
61014 */
61015 #ifndef NET_SKB_PAD
61016 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
61017 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
61018 #endif
61019
61020 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61021 diff --git a/include/linux/slab.h b/include/linux/slab.h
61022 index 573c809..e84c132 100644
61023 --- a/include/linux/slab.h
61024 +++ b/include/linux/slab.h
61025 @@ -11,12 +11,20 @@
61026
61027 #include <linux/gfp.h>
61028 #include <linux/types.h>
61029 +#include <linux/err.h>
61030
61031 /*
61032 * Flags to pass to kmem_cache_create().
61033 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61034 */
61035 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61036 +
61037 +#ifdef CONFIG_PAX_USERCOPY
61038 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61039 +#else
61040 +#define SLAB_USERCOPY 0x00000000UL
61041 +#endif
61042 +
61043 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61044 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61045 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61046 @@ -87,10 +95,13 @@
61047 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61048 * Both make kfree a no-op.
61049 */
61050 -#define ZERO_SIZE_PTR ((void *)16)
61051 +#define ZERO_SIZE_PTR \
61052 +({ \
61053 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61054 + (void *)(-MAX_ERRNO-1L); \
61055 +})
61056
61057 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61058 - (unsigned long)ZERO_SIZE_PTR)
61059 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61060
61061 /*
61062 * struct kmem_cache related prototypes
61063 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
61064 void kfree(const void *);
61065 void kzfree(const void *);
61066 size_t ksize(const void *);
61067 +void check_object_size(const void *ptr, unsigned long n, bool to);
61068
61069 /*
61070 * Allocator specific definitions. These are mainly used to establish optimized
61071 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
61072
61073 void __init kmem_cache_init_late(void);
61074
61075 +#define kmalloc(x, y) \
61076 +({ \
61077 + void *___retval; \
61078 + intoverflow_t ___x = (intoverflow_t)x; \
61079 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
61080 + ___retval = NULL; \
61081 + else \
61082 + ___retval = kmalloc((size_t)___x, (y)); \
61083 + ___retval; \
61084 +})
61085 +
61086 +#define kmalloc_node(x, y, z) \
61087 +({ \
61088 + void *___retval; \
61089 + intoverflow_t ___x = (intoverflow_t)x; \
61090 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61091 + ___retval = NULL; \
61092 + else \
61093 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
61094 + ___retval; \
61095 +})
61096 +
61097 +#define kzalloc(x, y) \
61098 +({ \
61099 + void *___retval; \
61100 + intoverflow_t ___x = (intoverflow_t)x; \
61101 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
61102 + ___retval = NULL; \
61103 + else \
61104 + ___retval = kzalloc((size_t)___x, (y)); \
61105 + ___retval; \
61106 +})
61107 +
61108 +#define __krealloc(x, y, z) \
61109 +({ \
61110 + void *___retval; \
61111 + intoverflow_t ___y = (intoverflow_t)y; \
61112 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
61113 + ___retval = NULL; \
61114 + else \
61115 + ___retval = __krealloc((x), (size_t)___y, (z)); \
61116 + ___retval; \
61117 +})
61118 +
61119 +#define krealloc(x, y, z) \
61120 +({ \
61121 + void *___retval; \
61122 + intoverflow_t ___y = (intoverflow_t)y; \
61123 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
61124 + ___retval = NULL; \
61125 + else \
61126 + ___retval = krealloc((x), (size_t)___y, (z)); \
61127 + ___retval; \
61128 +})
61129 +
61130 #endif /* _LINUX_SLAB_H */
61131 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
61132 index d00e0ba..1b3bf7b 100644
61133 --- a/include/linux/slab_def.h
61134 +++ b/include/linux/slab_def.h
61135 @@ -68,10 +68,10 @@ struct kmem_cache {
61136 unsigned long node_allocs;
61137 unsigned long node_frees;
61138 unsigned long node_overflow;
61139 - atomic_t allochit;
61140 - atomic_t allocmiss;
61141 - atomic_t freehit;
61142 - atomic_t freemiss;
61143 + atomic_unchecked_t allochit;
61144 + atomic_unchecked_t allocmiss;
61145 + atomic_unchecked_t freehit;
61146 + atomic_unchecked_t freemiss;
61147
61148 /*
61149 * If debugging is enabled, then the allocator can add additional
61150 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
61151 index a32bcfd..53b71f4 100644
61152 --- a/include/linux/slub_def.h
61153 +++ b/include/linux/slub_def.h
61154 @@ -89,7 +89,7 @@ struct kmem_cache {
61155 struct kmem_cache_order_objects max;
61156 struct kmem_cache_order_objects min;
61157 gfp_t allocflags; /* gfp flags to use on each alloc */
61158 - int refcount; /* Refcount for slab cache destroy */
61159 + atomic_t refcount; /* Refcount for slab cache destroy */
61160 void (*ctor)(void *);
61161 int inuse; /* Offset to metadata */
61162 int align; /* Alignment */
61163 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
61164 }
61165
61166 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61167 -void *__kmalloc(size_t size, gfp_t flags);
61168 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61169
61170 static __always_inline void *
61171 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
61172 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
61173 index de8832d..0147b46 100644
61174 --- a/include/linux/sonet.h
61175 +++ b/include/linux/sonet.h
61176 @@ -61,7 +61,7 @@ struct sonet_stats {
61177 #include <linux/atomic.h>
61178
61179 struct k_sonet_stats {
61180 -#define __HANDLE_ITEM(i) atomic_t i
61181 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61182 __SONET_ITEMS
61183 #undef __HANDLE_ITEM
61184 };
61185 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
61186 index 3d8f9c4..69f1c0a 100644
61187 --- a/include/linux/sunrpc/clnt.h
61188 +++ b/include/linux/sunrpc/clnt.h
61189 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
61190 {
61191 switch (sap->sa_family) {
61192 case AF_INET:
61193 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
61194 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61195 case AF_INET6:
61196 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61197 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61198 }
61199 return 0;
61200 }
61201 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61202 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61203 const struct sockaddr *src)
61204 {
61205 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61206 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61207 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61208
61209 dsin->sin_family = ssin->sin_family;
61210 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61211 if (sa->sa_family != AF_INET6)
61212 return 0;
61213
61214 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61215 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61216 }
61217
61218 #endif /* __KERNEL__ */
61219 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61220 index e775689..9e206d9 100644
61221 --- a/include/linux/sunrpc/sched.h
61222 +++ b/include/linux/sunrpc/sched.h
61223 @@ -105,6 +105,7 @@ struct rpc_call_ops {
61224 void (*rpc_call_done)(struct rpc_task *, void *);
61225 void (*rpc_release)(void *);
61226 };
61227 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61228
61229 struct rpc_task_setup {
61230 struct rpc_task *task;
61231 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61232 index c14fe86..393245e 100644
61233 --- a/include/linux/sunrpc/svc_rdma.h
61234 +++ b/include/linux/sunrpc/svc_rdma.h
61235 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61236 extern unsigned int svcrdma_max_requests;
61237 extern unsigned int svcrdma_max_req_size;
61238
61239 -extern atomic_t rdma_stat_recv;
61240 -extern atomic_t rdma_stat_read;
61241 -extern atomic_t rdma_stat_write;
61242 -extern atomic_t rdma_stat_sq_starve;
61243 -extern atomic_t rdma_stat_rq_starve;
61244 -extern atomic_t rdma_stat_rq_poll;
61245 -extern atomic_t rdma_stat_rq_prod;
61246 -extern atomic_t rdma_stat_sq_poll;
61247 -extern atomic_t rdma_stat_sq_prod;
61248 +extern atomic_unchecked_t rdma_stat_recv;
61249 +extern atomic_unchecked_t rdma_stat_read;
61250 +extern atomic_unchecked_t rdma_stat_write;
61251 +extern atomic_unchecked_t rdma_stat_sq_starve;
61252 +extern atomic_unchecked_t rdma_stat_rq_starve;
61253 +extern atomic_unchecked_t rdma_stat_rq_poll;
61254 +extern atomic_unchecked_t rdma_stat_rq_prod;
61255 +extern atomic_unchecked_t rdma_stat_sq_poll;
61256 +extern atomic_unchecked_t rdma_stat_sq_prod;
61257
61258 #define RPCRDMA_VERSION 1
61259
61260 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61261 index 703cfa33..0b8ca72ac 100644
61262 --- a/include/linux/sysctl.h
61263 +++ b/include/linux/sysctl.h
61264 @@ -155,7 +155,11 @@ enum
61265 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61266 };
61267
61268 -
61269 +#ifdef CONFIG_PAX_SOFTMODE
61270 +enum {
61271 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61272 +};
61273 +#endif
61274
61275 /* CTL_VM names: */
61276 enum
61277 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61278
61279 extern int proc_dostring(struct ctl_table *, int,
61280 void __user *, size_t *, loff_t *);
61281 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61282 + void __user *, size_t *, loff_t *);
61283 extern int proc_dointvec(struct ctl_table *, int,
61284 void __user *, size_t *, loff_t *);
61285 extern int proc_dointvec_minmax(struct ctl_table *, int,
61286 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61287 index a71a292..51bd91d 100644
61288 --- a/include/linux/tracehook.h
61289 +++ b/include/linux/tracehook.h
61290 @@ -54,12 +54,12 @@ struct linux_binprm;
61291 /*
61292 * ptrace report for syscall entry and exit looks identical.
61293 */
61294 -static inline void ptrace_report_syscall(struct pt_regs *regs)
61295 +static inline int ptrace_report_syscall(struct pt_regs *regs)
61296 {
61297 int ptrace = current->ptrace;
61298
61299 if (!(ptrace & PT_PTRACED))
61300 - return;
61301 + return 0;
61302
61303 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61304
61305 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61306 send_sig(current->exit_code, current, 1);
61307 current->exit_code = 0;
61308 }
61309 +
61310 + return fatal_signal_pending(current);
61311 }
61312
61313 /**
61314 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61315 static inline __must_check int tracehook_report_syscall_entry(
61316 struct pt_regs *regs)
61317 {
61318 - ptrace_report_syscall(regs);
61319 - return 0;
61320 + return ptrace_report_syscall(regs);
61321 }
61322
61323 /**
61324 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61325 index ff7dc08..893e1bd 100644
61326 --- a/include/linux/tty_ldisc.h
61327 +++ b/include/linux/tty_ldisc.h
61328 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61329
61330 struct module *owner;
61331
61332 - int refcount;
61333 + atomic_t refcount;
61334 };
61335
61336 struct tty_ldisc {
61337 diff --git a/include/linux/types.h b/include/linux/types.h
61338 index 57a9723..dbe234a 100644
61339 --- a/include/linux/types.h
61340 +++ b/include/linux/types.h
61341 @@ -213,10 +213,26 @@ typedef struct {
61342 int counter;
61343 } atomic_t;
61344
61345 +#ifdef CONFIG_PAX_REFCOUNT
61346 +typedef struct {
61347 + int counter;
61348 +} atomic_unchecked_t;
61349 +#else
61350 +typedef atomic_t atomic_unchecked_t;
61351 +#endif
61352 +
61353 #ifdef CONFIG_64BIT
61354 typedef struct {
61355 long counter;
61356 } atomic64_t;
61357 +
61358 +#ifdef CONFIG_PAX_REFCOUNT
61359 +typedef struct {
61360 + long counter;
61361 +} atomic64_unchecked_t;
61362 +#else
61363 +typedef atomic64_t atomic64_unchecked_t;
61364 +#endif
61365 #endif
61366
61367 struct list_head {
61368 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61369 index 5ca0951..ab496a5 100644
61370 --- a/include/linux/uaccess.h
61371 +++ b/include/linux/uaccess.h
61372 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61373 long ret; \
61374 mm_segment_t old_fs = get_fs(); \
61375 \
61376 - set_fs(KERNEL_DS); \
61377 pagefault_disable(); \
61378 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61379 - pagefault_enable(); \
61380 + set_fs(KERNEL_DS); \
61381 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61382 set_fs(old_fs); \
61383 + pagefault_enable(); \
61384 ret; \
61385 })
61386
61387 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61388 index 99c1b4d..bb94261 100644
61389 --- a/include/linux/unaligned/access_ok.h
61390 +++ b/include/linux/unaligned/access_ok.h
61391 @@ -6,32 +6,32 @@
61392
61393 static inline u16 get_unaligned_le16(const void *p)
61394 {
61395 - return le16_to_cpup((__le16 *)p);
61396 + return le16_to_cpup((const __le16 *)p);
61397 }
61398
61399 static inline u32 get_unaligned_le32(const void *p)
61400 {
61401 - return le32_to_cpup((__le32 *)p);
61402 + return le32_to_cpup((const __le32 *)p);
61403 }
61404
61405 static inline u64 get_unaligned_le64(const void *p)
61406 {
61407 - return le64_to_cpup((__le64 *)p);
61408 + return le64_to_cpup((const __le64 *)p);
61409 }
61410
61411 static inline u16 get_unaligned_be16(const void *p)
61412 {
61413 - return be16_to_cpup((__be16 *)p);
61414 + return be16_to_cpup((const __be16 *)p);
61415 }
61416
61417 static inline u32 get_unaligned_be32(const void *p)
61418 {
61419 - return be32_to_cpup((__be32 *)p);
61420 + return be32_to_cpup((const __be32 *)p);
61421 }
61422
61423 static inline u64 get_unaligned_be64(const void *p)
61424 {
61425 - return be64_to_cpup((__be64 *)p);
61426 + return be64_to_cpup((const __be64 *)p);
61427 }
61428
61429 static inline void put_unaligned_le16(u16 val, void *p)
61430 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61431 index e5a40c3..20ab0f6 100644
61432 --- a/include/linux/usb/renesas_usbhs.h
61433 +++ b/include/linux/usb/renesas_usbhs.h
61434 @@ -39,7 +39,7 @@ enum {
61435 */
61436 struct renesas_usbhs_driver_callback {
61437 int (*notify_hotplug)(struct platform_device *pdev);
61438 -};
61439 +} __no_const;
61440
61441 /*
61442 * callback functions for platform
61443 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61444 * VBUS control is needed for Host
61445 */
61446 int (*set_vbus)(struct platform_device *pdev, int enable);
61447 -};
61448 +} __no_const;
61449
61450 /*
61451 * parameters for renesas usbhs
61452 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61453 index 6f8fbcf..8259001 100644
61454 --- a/include/linux/vermagic.h
61455 +++ b/include/linux/vermagic.h
61456 @@ -25,9 +25,35 @@
61457 #define MODULE_ARCH_VERMAGIC ""
61458 #endif
61459
61460 +#ifdef CONFIG_PAX_REFCOUNT
61461 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61462 +#else
61463 +#define MODULE_PAX_REFCOUNT ""
61464 +#endif
61465 +
61466 +#ifdef CONSTIFY_PLUGIN
61467 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61468 +#else
61469 +#define MODULE_CONSTIFY_PLUGIN ""
61470 +#endif
61471 +
61472 +#ifdef STACKLEAK_PLUGIN
61473 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61474 +#else
61475 +#define MODULE_STACKLEAK_PLUGIN ""
61476 +#endif
61477 +
61478 +#ifdef CONFIG_GRKERNSEC
61479 +#define MODULE_GRSEC "GRSEC "
61480 +#else
61481 +#define MODULE_GRSEC ""
61482 +#endif
61483 +
61484 #define VERMAGIC_STRING \
61485 UTS_RELEASE " " \
61486 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61487 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61488 - MODULE_ARCH_VERMAGIC
61489 + MODULE_ARCH_VERMAGIC \
61490 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61491 + MODULE_GRSEC
61492
61493 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61494 index 4bde182..aec92c1 100644
61495 --- a/include/linux/vmalloc.h
61496 +++ b/include/linux/vmalloc.h
61497 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61498 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61499 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61500 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61501 +
61502 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61503 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61504 +#endif
61505 +
61506 /* bits [20..32] reserved for arch specific ioremap internals */
61507
61508 /*
61509 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61510 # endif
61511 #endif
61512
61513 +#define vmalloc(x) \
61514 +({ \
61515 + void *___retval; \
61516 + intoverflow_t ___x = (intoverflow_t)x; \
61517 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61518 + ___retval = NULL; \
61519 + else \
61520 + ___retval = vmalloc((unsigned long)___x); \
61521 + ___retval; \
61522 +})
61523 +
61524 +#define vzalloc(x) \
61525 +({ \
61526 + void *___retval; \
61527 + intoverflow_t ___x = (intoverflow_t)x; \
61528 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61529 + ___retval = NULL; \
61530 + else \
61531 + ___retval = vzalloc((unsigned long)___x); \
61532 + ___retval; \
61533 +})
61534 +
61535 +#define __vmalloc(x, y, z) \
61536 +({ \
61537 + void *___retval; \
61538 + intoverflow_t ___x = (intoverflow_t)x; \
61539 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61540 + ___retval = NULL; \
61541 + else \
61542 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61543 + ___retval; \
61544 +})
61545 +
61546 +#define vmalloc_user(x) \
61547 +({ \
61548 + void *___retval; \
61549 + intoverflow_t ___x = (intoverflow_t)x; \
61550 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61551 + ___retval = NULL; \
61552 + else \
61553 + ___retval = vmalloc_user((unsigned long)___x); \
61554 + ___retval; \
61555 +})
61556 +
61557 +#define vmalloc_exec(x) \
61558 +({ \
61559 + void *___retval; \
61560 + intoverflow_t ___x = (intoverflow_t)x; \
61561 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61562 + ___retval = NULL; \
61563 + else \
61564 + ___retval = vmalloc_exec((unsigned long)___x); \
61565 + ___retval; \
61566 +})
61567 +
61568 +#define vmalloc_node(x, y) \
61569 +({ \
61570 + void *___retval; \
61571 + intoverflow_t ___x = (intoverflow_t)x; \
61572 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61573 + ___retval = NULL; \
61574 + else \
61575 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61576 + ___retval; \
61577 +})
61578 +
61579 +#define vzalloc_node(x, y) \
61580 +({ \
61581 + void *___retval; \
61582 + intoverflow_t ___x = (intoverflow_t)x; \
61583 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61584 + ___retval = NULL; \
61585 + else \
61586 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61587 + ___retval; \
61588 +})
61589 +
61590 +#define vmalloc_32(x) \
61591 +({ \
61592 + void *___retval; \
61593 + intoverflow_t ___x = (intoverflow_t)x; \
61594 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61595 + ___retval = NULL; \
61596 + else \
61597 + ___retval = vmalloc_32((unsigned long)___x); \
61598 + ___retval; \
61599 +})
61600 +
61601 +#define vmalloc_32_user(x) \
61602 +({ \
61603 +void *___retval; \
61604 + intoverflow_t ___x = (intoverflow_t)x; \
61605 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61606 + ___retval = NULL; \
61607 + else \
61608 + ___retval = vmalloc_32_user((unsigned long)___x);\
61609 + ___retval; \
61610 +})
61611 +
61612 #endif /* _LINUX_VMALLOC_H */
61613 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61614 index 65efb92..137adbb 100644
61615 --- a/include/linux/vmstat.h
61616 +++ b/include/linux/vmstat.h
61617 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61618 /*
61619 * Zone based page accounting with per cpu differentials.
61620 */
61621 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61622 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61623
61624 static inline void zone_page_state_add(long x, struct zone *zone,
61625 enum zone_stat_item item)
61626 {
61627 - atomic_long_add(x, &zone->vm_stat[item]);
61628 - atomic_long_add(x, &vm_stat[item]);
61629 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61630 + atomic_long_add_unchecked(x, &vm_stat[item]);
61631 }
61632
61633 static inline unsigned long global_page_state(enum zone_stat_item item)
61634 {
61635 - long x = atomic_long_read(&vm_stat[item]);
61636 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61637 #ifdef CONFIG_SMP
61638 if (x < 0)
61639 x = 0;
61640 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61641 static inline unsigned long zone_page_state(struct zone *zone,
61642 enum zone_stat_item item)
61643 {
61644 - long x = atomic_long_read(&zone->vm_stat[item]);
61645 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61646 #ifdef CONFIG_SMP
61647 if (x < 0)
61648 x = 0;
61649 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61650 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61651 enum zone_stat_item item)
61652 {
61653 - long x = atomic_long_read(&zone->vm_stat[item]);
61654 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61655
61656 #ifdef CONFIG_SMP
61657 int cpu;
61658 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61659
61660 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61661 {
61662 - atomic_long_inc(&zone->vm_stat[item]);
61663 - atomic_long_inc(&vm_stat[item]);
61664 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61665 + atomic_long_inc_unchecked(&vm_stat[item]);
61666 }
61667
61668 static inline void __inc_zone_page_state(struct page *page,
61669 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61670
61671 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61672 {
61673 - atomic_long_dec(&zone->vm_stat[item]);
61674 - atomic_long_dec(&vm_stat[item]);
61675 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61676 + atomic_long_dec_unchecked(&vm_stat[item]);
61677 }
61678
61679 static inline void __dec_zone_page_state(struct page *page,
61680 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61681 index e5d1220..ef6e406 100644
61682 --- a/include/linux/xattr.h
61683 +++ b/include/linux/xattr.h
61684 @@ -57,6 +57,11 @@
61685 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61686 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61687
61688 +/* User namespace */
61689 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61690 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61691 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61692 +
61693 #ifdef __KERNEL__
61694
61695 #include <linux/types.h>
61696 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61697 index 4aeff96..b378cdc 100644
61698 --- a/include/media/saa7146_vv.h
61699 +++ b/include/media/saa7146_vv.h
61700 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61701 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61702
61703 /* the extension can override this */
61704 - struct v4l2_ioctl_ops ops;
61705 + v4l2_ioctl_ops_no_const ops;
61706 /* pointer to the saa7146 core ops */
61707 const struct v4l2_ioctl_ops *core_ops;
61708
61709 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61710 index c7c40f1..4f01585 100644
61711 --- a/include/media/v4l2-dev.h
61712 +++ b/include/media/v4l2-dev.h
61713 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61714
61715
61716 struct v4l2_file_operations {
61717 - struct module *owner;
61718 + struct module * const owner;
61719 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61720 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61721 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61722 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61723 int (*open) (struct file *);
61724 int (*release) (struct file *);
61725 };
61726 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61727
61728 /*
61729 * Newer version of video_device, handled by videodev2.c
61730 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61731 index 4d1c74a..65e1221 100644
61732 --- a/include/media/v4l2-ioctl.h
61733 +++ b/include/media/v4l2-ioctl.h
61734 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61735 long (*vidioc_default) (struct file *file, void *fh,
61736 bool valid_prio, int cmd, void *arg);
61737 };
61738 -
61739 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61740
61741 /* v4l debugging and diagnostics */
61742
61743 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61744 index 8d55251..dfe5b0a 100644
61745 --- a/include/net/caif/caif_hsi.h
61746 +++ b/include/net/caif/caif_hsi.h
61747 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61748 void (*rx_done_cb) (struct cfhsi_drv *drv);
61749 void (*wake_up_cb) (struct cfhsi_drv *drv);
61750 void (*wake_down_cb) (struct cfhsi_drv *drv);
61751 -};
61752 +} __no_const;
61753
61754 /* Structure implemented by HSI device. */
61755 struct cfhsi_dev {
61756 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61757 index 9e5425b..8136ffc 100644
61758 --- a/include/net/caif/cfctrl.h
61759 +++ b/include/net/caif/cfctrl.h
61760 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61761 void (*radioset_rsp)(void);
61762 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61763 struct cflayer *client_layer);
61764 -};
61765 +} __no_const;
61766
61767 /* Link Setup Parameters for CAIF-Links. */
61768 struct cfctrl_link_param {
61769 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61770 struct cfctrl {
61771 struct cfsrvl serv;
61772 struct cfctrl_rsp res;
61773 - atomic_t req_seq_no;
61774 - atomic_t rsp_seq_no;
61775 + atomic_unchecked_t req_seq_no;
61776 + atomic_unchecked_t rsp_seq_no;
61777 struct list_head list;
61778 /* Protects from simultaneous access to first_req list */
61779 spinlock_t info_list_lock;
61780 diff --git a/include/net/flow.h b/include/net/flow.h
61781 index 2a7eefd..3250f3b 100644
61782 --- a/include/net/flow.h
61783 +++ b/include/net/flow.h
61784 @@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61785
61786 extern void flow_cache_flush(void);
61787 extern void flow_cache_flush_deferred(void);
61788 -extern atomic_t flow_cache_genid;
61789 +extern atomic_unchecked_t flow_cache_genid;
61790
61791 #endif
61792 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61793 index e9ff3fc..9d3e5c7 100644
61794 --- a/include/net/inetpeer.h
61795 +++ b/include/net/inetpeer.h
61796 @@ -48,8 +48,8 @@ struct inet_peer {
61797 */
61798 union {
61799 struct {
61800 - atomic_t rid; /* Frag reception counter */
61801 - atomic_t ip_id_count; /* IP ID for the next packet */
61802 + atomic_unchecked_t rid; /* Frag reception counter */
61803 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61804 __u32 tcp_ts;
61805 __u32 tcp_ts_stamp;
61806 };
61807 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61808 more++;
61809 inet_peer_refcheck(p);
61810 do {
61811 - old = atomic_read(&p->ip_id_count);
61812 + old = atomic_read_unchecked(&p->ip_id_count);
61813 new = old + more;
61814 if (!new)
61815 new = 1;
61816 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61817 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61818 return new;
61819 }
61820
61821 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61822 index 10422ef..662570f 100644
61823 --- a/include/net/ip_fib.h
61824 +++ b/include/net/ip_fib.h
61825 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61826
61827 #define FIB_RES_SADDR(net, res) \
61828 ((FIB_RES_NH(res).nh_saddr_genid == \
61829 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61830 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61831 FIB_RES_NH(res).nh_saddr : \
61832 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61833 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61834 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61835 index e5a7b9a..f4fc44b 100644
61836 --- a/include/net/ip_vs.h
61837 +++ b/include/net/ip_vs.h
61838 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61839 struct ip_vs_conn *control; /* Master control connection */
61840 atomic_t n_control; /* Number of controlled ones */
61841 struct ip_vs_dest *dest; /* real server */
61842 - atomic_t in_pkts; /* incoming packet counter */
61843 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61844
61845 /* packet transmitter for different forwarding methods. If it
61846 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61847 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61848 __be16 port; /* port number of the server */
61849 union nf_inet_addr addr; /* IP address of the server */
61850 volatile unsigned flags; /* dest status flags */
61851 - atomic_t conn_flags; /* flags to copy to conn */
61852 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61853 atomic_t weight; /* server weight */
61854
61855 atomic_t refcnt; /* reference counter */
61856 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61857 index 69b610a..fe3962c 100644
61858 --- a/include/net/irda/ircomm_core.h
61859 +++ b/include/net/irda/ircomm_core.h
61860 @@ -51,7 +51,7 @@ typedef struct {
61861 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61862 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61863 struct ircomm_info *);
61864 -} call_t;
61865 +} __no_const call_t;
61866
61867 struct ircomm_cb {
61868 irda_queue_t queue;
61869 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61870 index 59ba38bc..d515662 100644
61871 --- a/include/net/irda/ircomm_tty.h
61872 +++ b/include/net/irda/ircomm_tty.h
61873 @@ -35,6 +35,7 @@
61874 #include <linux/termios.h>
61875 #include <linux/timer.h>
61876 #include <linux/tty.h> /* struct tty_struct */
61877 +#include <asm/local.h>
61878
61879 #include <net/irda/irias_object.h>
61880 #include <net/irda/ircomm_core.h>
61881 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61882 unsigned short close_delay;
61883 unsigned short closing_wait; /* time to wait before closing */
61884
61885 - int open_count;
61886 - int blocked_open; /* # of blocked opens */
61887 + local_t open_count;
61888 + local_t blocked_open; /* # of blocked opens */
61889
61890 /* Protect concurent access to :
61891 * o self->open_count
61892 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61893 index f2419cf..473679f 100644
61894 --- a/include/net/iucv/af_iucv.h
61895 +++ b/include/net/iucv/af_iucv.h
61896 @@ -139,7 +139,7 @@ struct iucv_sock {
61897 struct iucv_sock_list {
61898 struct hlist_head head;
61899 rwlock_t lock;
61900 - atomic_t autobind_name;
61901 + atomic_unchecked_t autobind_name;
61902 };
61903
61904 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61905 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61906 index 2720884..3aa5c25 100644
61907 --- a/include/net/neighbour.h
61908 +++ b/include/net/neighbour.h
61909 @@ -122,7 +122,7 @@ struct neigh_ops {
61910 void (*error_report)(struct neighbour *, struct sk_buff *);
61911 int (*output)(struct neighbour *, struct sk_buff *);
61912 int (*connected_output)(struct neighbour *, struct sk_buff *);
61913 -};
61914 +} __do_const;
61915
61916 struct pneigh_entry {
61917 struct pneigh_entry *next;
61918 diff --git a/include/net/netlink.h b/include/net/netlink.h
61919 index cb1f350..3279d2c 100644
61920 --- a/include/net/netlink.h
61921 +++ b/include/net/netlink.h
61922 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61923 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61924 {
61925 if (mark)
61926 - skb_trim(skb, (unsigned char *) mark - skb->data);
61927 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61928 }
61929
61930 /**
61931 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61932 index d786b4f..4c3dd41 100644
61933 --- a/include/net/netns/ipv4.h
61934 +++ b/include/net/netns/ipv4.h
61935 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61936
61937 unsigned int sysctl_ping_group_range[2];
61938
61939 - atomic_t rt_genid;
61940 - atomic_t dev_addr_genid;
61941 + atomic_unchecked_t rt_genid;
61942 + atomic_unchecked_t dev_addr_genid;
61943
61944 #ifdef CONFIG_IP_MROUTE
61945 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61946 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61947 index 6a72a58..e6a127d 100644
61948 --- a/include/net/sctp/sctp.h
61949 +++ b/include/net/sctp/sctp.h
61950 @@ -318,9 +318,9 @@ do { \
61951
61952 #else /* SCTP_DEBUG */
61953
61954 -#define SCTP_DEBUG_PRINTK(whatever...)
61955 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61956 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61957 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61958 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61959 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61960 #define SCTP_ENABLE_DEBUG
61961 #define SCTP_DISABLE_DEBUG
61962 #define SCTP_ASSERT(expr, str, func)
61963 diff --git a/include/net/sock.h b/include/net/sock.h
61964 index 32e3937..87a1dbc 100644
61965 --- a/include/net/sock.h
61966 +++ b/include/net/sock.h
61967 @@ -277,7 +277,7 @@ struct sock {
61968 #ifdef CONFIG_RPS
61969 __u32 sk_rxhash;
61970 #endif
61971 - atomic_t sk_drops;
61972 + atomic_unchecked_t sk_drops;
61973 int sk_rcvbuf;
61974
61975 struct sk_filter __rcu *sk_filter;
61976 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61977 }
61978
61979 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61980 - char __user *from, char *to,
61981 + char __user *from, unsigned char *to,
61982 int copy, int offset)
61983 {
61984 if (skb->ip_summed == CHECKSUM_NONE) {
61985 diff --git a/include/net/tcp.h b/include/net/tcp.h
61986 index bb18c4d..bb87972 100644
61987 --- a/include/net/tcp.h
61988 +++ b/include/net/tcp.h
61989 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61990 char *name;
61991 sa_family_t family;
61992 const struct file_operations *seq_fops;
61993 - struct seq_operations seq_ops;
61994 + seq_operations_no_const seq_ops;
61995 };
61996
61997 struct tcp_iter_state {
61998 diff --git a/include/net/udp.h b/include/net/udp.h
61999 index 3b285f4..0219639 100644
62000 --- a/include/net/udp.h
62001 +++ b/include/net/udp.h
62002 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
62003 sa_family_t family;
62004 struct udp_table *udp_table;
62005 const struct file_operations *seq_fops;
62006 - struct seq_operations seq_ops;
62007 + seq_operations_no_const seq_ops;
62008 };
62009
62010 struct udp_iter_state {
62011 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
62012 index b203e14..1df3991 100644
62013 --- a/include/net/xfrm.h
62014 +++ b/include/net/xfrm.h
62015 @@ -505,7 +505,7 @@ struct xfrm_policy {
62016 struct timer_list timer;
62017
62018 struct flow_cache_object flo;
62019 - atomic_t genid;
62020 + atomic_unchecked_t genid;
62021 u32 priority;
62022 u32 index;
62023 struct xfrm_mark mark;
62024 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
62025 index 1a046b1..ee0bef0 100644
62026 --- a/include/rdma/iw_cm.h
62027 +++ b/include/rdma/iw_cm.h
62028 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
62029 int backlog);
62030
62031 int (*destroy_listen)(struct iw_cm_id *cm_id);
62032 -};
62033 +} __no_const;
62034
62035 /**
62036 * iw_create_cm_id - Create an IW CM identifier.
62037 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
62038 index 5d1a758..1dbf795 100644
62039 --- a/include/scsi/libfc.h
62040 +++ b/include/scsi/libfc.h
62041 @@ -748,6 +748,7 @@ struct libfc_function_template {
62042 */
62043 void (*disc_stop_final) (struct fc_lport *);
62044 };
62045 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
62046
62047 /**
62048 * struct fc_disc - Discovery context
62049 @@ -851,7 +852,7 @@ struct fc_lport {
62050 struct fc_vport *vport;
62051
62052 /* Operational Information */
62053 - struct libfc_function_template tt;
62054 + libfc_function_template_no_const tt;
62055 u8 link_up;
62056 u8 qfull;
62057 enum fc_lport_state state;
62058 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
62059 index 5591ed5..13eb457 100644
62060 --- a/include/scsi/scsi_device.h
62061 +++ b/include/scsi/scsi_device.h
62062 @@ -161,9 +161,9 @@ struct scsi_device {
62063 unsigned int max_device_blocked; /* what device_blocked counts down from */
62064 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
62065
62066 - atomic_t iorequest_cnt;
62067 - atomic_t iodone_cnt;
62068 - atomic_t ioerr_cnt;
62069 + atomic_unchecked_t iorequest_cnt;
62070 + atomic_unchecked_t iodone_cnt;
62071 + atomic_unchecked_t ioerr_cnt;
62072
62073 struct device sdev_gendev,
62074 sdev_dev;
62075 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
62076 index 2a65167..91e01f8 100644
62077 --- a/include/scsi/scsi_transport_fc.h
62078 +++ b/include/scsi/scsi_transport_fc.h
62079 @@ -711,7 +711,7 @@ struct fc_function_template {
62080 unsigned long show_host_system_hostname:1;
62081
62082 unsigned long disable_target_scan:1;
62083 -};
62084 +} __do_const;
62085
62086
62087 /**
62088 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
62089 index 030b87c..98a6954 100644
62090 --- a/include/sound/ak4xxx-adda.h
62091 +++ b/include/sound/ak4xxx-adda.h
62092 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62093 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62094 unsigned char val);
62095 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62096 -};
62097 +} __no_const;
62098
62099 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62100
62101 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
62102 index 8c05e47..2b5df97 100644
62103 --- a/include/sound/hwdep.h
62104 +++ b/include/sound/hwdep.h
62105 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62106 struct snd_hwdep_dsp_status *status);
62107 int (*dsp_load)(struct snd_hwdep *hw,
62108 struct snd_hwdep_dsp_image *image);
62109 -};
62110 +} __no_const;
62111
62112 struct snd_hwdep {
62113 struct snd_card *card;
62114 diff --git a/include/sound/info.h b/include/sound/info.h
62115 index 5492cc4..1a65278 100644
62116 --- a/include/sound/info.h
62117 +++ b/include/sound/info.h
62118 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
62119 struct snd_info_buffer *buffer);
62120 void (*write)(struct snd_info_entry *entry,
62121 struct snd_info_buffer *buffer);
62122 -};
62123 +} __no_const;
62124
62125 struct snd_info_entry_ops {
62126 int (*open)(struct snd_info_entry *entry,
62127 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
62128 index 0cf91b2..b70cae4 100644
62129 --- a/include/sound/pcm.h
62130 +++ b/include/sound/pcm.h
62131 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
62132 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
62133 int (*ack)(struct snd_pcm_substream *substream);
62134 };
62135 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
62136
62137 /*
62138 *
62139 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
62140 index af1b49e..a5d55a5 100644
62141 --- a/include/sound/sb16_csp.h
62142 +++ b/include/sound/sb16_csp.h
62143 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
62144 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62145 int (*csp_stop) (struct snd_sb_csp * p);
62146 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62147 -};
62148 +} __no_const;
62149
62150 /*
62151 * CSP private data
62152 diff --git a/include/sound/soc.h b/include/sound/soc.h
62153 index 11cfb59..e3f93f4 100644
62154 --- a/include/sound/soc.h
62155 +++ b/include/sound/soc.h
62156 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
62157 /* platform IO - used for platform DAPM */
62158 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
62159 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
62160 -};
62161 +} __do_const;
62162
62163 struct snd_soc_platform {
62164 const char *name;
62165 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
62166 index 444cd6b..3327cc5 100644
62167 --- a/include/sound/ymfpci.h
62168 +++ b/include/sound/ymfpci.h
62169 @@ -358,7 +358,7 @@ struct snd_ymfpci {
62170 spinlock_t reg_lock;
62171 spinlock_t voice_lock;
62172 wait_queue_head_t interrupt_sleep;
62173 - atomic_t interrupt_sleep_count;
62174 + atomic_unchecked_t interrupt_sleep_count;
62175 struct snd_info_entry *proc_entry;
62176 const struct firmware *dsp_microcode;
62177 const struct firmware *controller_microcode;
62178 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
62179 index 94bbec3..3a8c6b0 100644
62180 --- a/include/target/target_core_base.h
62181 +++ b/include/target/target_core_base.h
62182 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
62183 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
62184 int (*t10_pr_register)(struct se_cmd *);
62185 int (*t10_pr_clear)(struct se_cmd *);
62186 -};
62187 +} __no_const;
62188
62189 struct t10_reservation {
62190 /* Reservation effects all target ports */
62191 @@ -465,8 +465,8 @@ struct se_cmd {
62192 atomic_t t_se_count;
62193 atomic_t t_task_cdbs_left;
62194 atomic_t t_task_cdbs_ex_left;
62195 - atomic_t t_task_cdbs_sent;
62196 - atomic_t t_transport_aborted;
62197 + atomic_unchecked_t t_task_cdbs_sent;
62198 + atomic_unchecked_t t_transport_aborted;
62199 atomic_t t_transport_active;
62200 atomic_t t_transport_complete;
62201 atomic_t t_transport_queue_active;
62202 @@ -705,7 +705,7 @@ struct se_device {
62203 /* Active commands on this virtual SE device */
62204 atomic_t simple_cmds;
62205 atomic_t depth_left;
62206 - atomic_t dev_ordered_id;
62207 + atomic_unchecked_t dev_ordered_id;
62208 atomic_t execute_tasks;
62209 atomic_t dev_ordered_sync;
62210 atomic_t dev_qf_count;
62211 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
62212 index 1c09820..7f5ec79 100644
62213 --- a/include/trace/events/irq.h
62214 +++ b/include/trace/events/irq.h
62215 @@ -36,7 +36,7 @@ struct softirq_action;
62216 */
62217 TRACE_EVENT(irq_handler_entry,
62218
62219 - TP_PROTO(int irq, struct irqaction *action),
62220 + TP_PROTO(int irq, const struct irqaction *action),
62221
62222 TP_ARGS(irq, action),
62223
62224 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
62225 */
62226 TRACE_EVENT(irq_handler_exit,
62227
62228 - TP_PROTO(int irq, struct irqaction *action, int ret),
62229 + TP_PROTO(int irq, const struct irqaction *action, int ret),
62230
62231 TP_ARGS(irq, action, ret),
62232
62233 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62234 index c41f308..6918de3 100644
62235 --- a/include/video/udlfb.h
62236 +++ b/include/video/udlfb.h
62237 @@ -52,10 +52,10 @@ struct dlfb_data {
62238 u32 pseudo_palette[256];
62239 int blank_mode; /*one of FB_BLANK_ */
62240 /* blit-only rendering path metrics, exposed through sysfs */
62241 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62242 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62243 - atomic_t bytes_sent; /* to usb, after compression including overhead */
62244 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62245 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62246 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62247 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62248 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62249 };
62250
62251 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62252 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62253 index 0993a22..32ba2fe 100644
62254 --- a/include/video/uvesafb.h
62255 +++ b/include/video/uvesafb.h
62256 @@ -177,6 +177,7 @@ struct uvesafb_par {
62257 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62258 u8 pmi_setpal; /* PMI for palette changes */
62259 u16 *pmi_base; /* protected mode interface location */
62260 + u8 *pmi_code; /* protected mode code location */
62261 void *pmi_start;
62262 void *pmi_pal;
62263 u8 *vbe_state_orig; /*
62264 diff --git a/init/Kconfig b/init/Kconfig
62265 index 43298f9..2f56c12 100644
62266 --- a/init/Kconfig
62267 +++ b/init/Kconfig
62268 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62269
62270 config COMPAT_BRK
62271 bool "Disable heap randomization"
62272 - default y
62273 + default n
62274 help
62275 Randomizing heap placement makes heap exploits harder, but it
62276 also breaks ancient binaries (including anything libc5 based).
62277 diff --git a/init/do_mounts.c b/init/do_mounts.c
62278 index db6e5ee..7677ff7 100644
62279 --- a/init/do_mounts.c
62280 +++ b/init/do_mounts.c
62281 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62282
62283 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62284 {
62285 - int err = sys_mount(name, "/root", fs, flags, data);
62286 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62287 if (err)
62288 return err;
62289
62290 - sys_chdir((const char __user __force *)"/root");
62291 + sys_chdir((const char __force_user*)"/root");
62292 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62293 printk(KERN_INFO
62294 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62295 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62296 va_start(args, fmt);
62297 vsprintf(buf, fmt, args);
62298 va_end(args);
62299 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62300 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62301 if (fd >= 0) {
62302 sys_ioctl(fd, FDEJECT, 0);
62303 sys_close(fd);
62304 }
62305 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62306 - fd = sys_open("/dev/console", O_RDWR, 0);
62307 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62308 if (fd >= 0) {
62309 sys_ioctl(fd, TCGETS, (long)&termios);
62310 termios.c_lflag &= ~ICANON;
62311 sys_ioctl(fd, TCSETSF, (long)&termios);
62312 - sys_read(fd, &c, 1);
62313 + sys_read(fd, (char __user *)&c, 1);
62314 termios.c_lflag |= ICANON;
62315 sys_ioctl(fd, TCSETSF, (long)&termios);
62316 sys_close(fd);
62317 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62318 mount_root();
62319 out:
62320 devtmpfs_mount("dev");
62321 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62322 - sys_chroot((const char __user __force *)".");
62323 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62324 + sys_chroot((const char __force_user *)".");
62325 }
62326 diff --git a/init/do_mounts.h b/init/do_mounts.h
62327 index f5b978a..69dbfe8 100644
62328 --- a/init/do_mounts.h
62329 +++ b/init/do_mounts.h
62330 @@ -15,15 +15,15 @@ extern int root_mountflags;
62331
62332 static inline int create_dev(char *name, dev_t dev)
62333 {
62334 - sys_unlink(name);
62335 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62336 + sys_unlink((char __force_user *)name);
62337 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62338 }
62339
62340 #if BITS_PER_LONG == 32
62341 static inline u32 bstat(char *name)
62342 {
62343 struct stat64 stat;
62344 - if (sys_stat64(name, &stat) != 0)
62345 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62346 return 0;
62347 if (!S_ISBLK(stat.st_mode))
62348 return 0;
62349 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62350 static inline u32 bstat(char *name)
62351 {
62352 struct stat stat;
62353 - if (sys_newstat(name, &stat) != 0)
62354 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62355 return 0;
62356 if (!S_ISBLK(stat.st_mode))
62357 return 0;
62358 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62359 index 3098a38..253064e 100644
62360 --- a/init/do_mounts_initrd.c
62361 +++ b/init/do_mounts_initrd.c
62362 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62363 create_dev("/dev/root.old", Root_RAM0);
62364 /* mount initrd on rootfs' /root */
62365 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62366 - sys_mkdir("/old", 0700);
62367 - root_fd = sys_open("/", 0, 0);
62368 - old_fd = sys_open("/old", 0, 0);
62369 + sys_mkdir((const char __force_user *)"/old", 0700);
62370 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62371 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62372 /* move initrd over / and chdir/chroot in initrd root */
62373 - sys_chdir("/root");
62374 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62375 - sys_chroot(".");
62376 + sys_chdir((const char __force_user *)"/root");
62377 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62378 + sys_chroot((const char __force_user *)".");
62379
62380 /*
62381 * In case that a resume from disk is carried out by linuxrc or one of
62382 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62383
62384 /* move initrd to rootfs' /old */
62385 sys_fchdir(old_fd);
62386 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62387 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62388 /* switch root and cwd back to / of rootfs */
62389 sys_fchdir(root_fd);
62390 - sys_chroot(".");
62391 + sys_chroot((const char __force_user *)".");
62392 sys_close(old_fd);
62393 sys_close(root_fd);
62394
62395 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62396 - sys_chdir("/old");
62397 + sys_chdir((const char __force_user *)"/old");
62398 return;
62399 }
62400
62401 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62402 mount_root();
62403
62404 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62405 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62406 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62407 if (!error)
62408 printk("okay\n");
62409 else {
62410 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62411 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62412 if (error == -ENOENT)
62413 printk("/initrd does not exist. Ignored.\n");
62414 else
62415 printk("failed\n");
62416 printk(KERN_NOTICE "Unmounting old root\n");
62417 - sys_umount("/old", MNT_DETACH);
62418 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62419 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62420 if (fd < 0) {
62421 error = fd;
62422 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62423 * mounted in the normal path.
62424 */
62425 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62426 - sys_unlink("/initrd.image");
62427 + sys_unlink((const char __force_user *)"/initrd.image");
62428 handle_initrd();
62429 return 1;
62430 }
62431 }
62432 - sys_unlink("/initrd.image");
62433 + sys_unlink((const char __force_user *)"/initrd.image");
62434 return 0;
62435 }
62436 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62437 index 32c4799..c27ee74 100644
62438 --- a/init/do_mounts_md.c
62439 +++ b/init/do_mounts_md.c
62440 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62441 partitioned ? "_d" : "", minor,
62442 md_setup_args[ent].device_names);
62443
62444 - fd = sys_open(name, 0, 0);
62445 + fd = sys_open((char __force_user *)name, 0, 0);
62446 if (fd < 0) {
62447 printk(KERN_ERR "md: open failed - cannot start "
62448 "array %s\n", name);
62449 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62450 * array without it
62451 */
62452 sys_close(fd);
62453 - fd = sys_open(name, 0, 0);
62454 + fd = sys_open((char __force_user *)name, 0, 0);
62455 sys_ioctl(fd, BLKRRPART, 0);
62456 }
62457 sys_close(fd);
62458 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62459
62460 wait_for_device_probe();
62461
62462 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62463 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62464 if (fd >= 0) {
62465 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62466 sys_close(fd);
62467 diff --git a/init/initramfs.c b/init/initramfs.c
62468 index 2531811..040d4d4 100644
62469 --- a/init/initramfs.c
62470 +++ b/init/initramfs.c
62471 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62472 }
62473 }
62474
62475 -static long __init do_utime(char __user *filename, time_t mtime)
62476 +static long __init do_utime(__force char __user *filename, time_t mtime)
62477 {
62478 struct timespec t[2];
62479
62480 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62481 struct dir_entry *de, *tmp;
62482 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62483 list_del(&de->list);
62484 - do_utime(de->name, de->mtime);
62485 + do_utime((char __force_user *)de->name, de->mtime);
62486 kfree(de->name);
62487 kfree(de);
62488 }
62489 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62490 if (nlink >= 2) {
62491 char *old = find_link(major, minor, ino, mode, collected);
62492 if (old)
62493 - return (sys_link(old, collected) < 0) ? -1 : 1;
62494 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62495 }
62496 return 0;
62497 }
62498 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62499 {
62500 struct stat st;
62501
62502 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62503 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62504 if (S_ISDIR(st.st_mode))
62505 - sys_rmdir(path);
62506 + sys_rmdir((char __force_user *)path);
62507 else
62508 - sys_unlink(path);
62509 + sys_unlink((char __force_user *)path);
62510 }
62511 }
62512
62513 @@ -305,7 +305,7 @@ static int __init do_name(void)
62514 int openflags = O_WRONLY|O_CREAT;
62515 if (ml != 1)
62516 openflags |= O_TRUNC;
62517 - wfd = sys_open(collected, openflags, mode);
62518 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62519
62520 if (wfd >= 0) {
62521 sys_fchown(wfd, uid, gid);
62522 @@ -317,17 +317,17 @@ static int __init do_name(void)
62523 }
62524 }
62525 } else if (S_ISDIR(mode)) {
62526 - sys_mkdir(collected, mode);
62527 - sys_chown(collected, uid, gid);
62528 - sys_chmod(collected, mode);
62529 + sys_mkdir((char __force_user *)collected, mode);
62530 + sys_chown((char __force_user *)collected, uid, gid);
62531 + sys_chmod((char __force_user *)collected, mode);
62532 dir_add(collected, mtime);
62533 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62534 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62535 if (maybe_link() == 0) {
62536 - sys_mknod(collected, mode, rdev);
62537 - sys_chown(collected, uid, gid);
62538 - sys_chmod(collected, mode);
62539 - do_utime(collected, mtime);
62540 + sys_mknod((char __force_user *)collected, mode, rdev);
62541 + sys_chown((char __force_user *)collected, uid, gid);
62542 + sys_chmod((char __force_user *)collected, mode);
62543 + do_utime((char __force_user *)collected, mtime);
62544 }
62545 }
62546 return 0;
62547 @@ -336,15 +336,15 @@ static int __init do_name(void)
62548 static int __init do_copy(void)
62549 {
62550 if (count >= body_len) {
62551 - sys_write(wfd, victim, body_len);
62552 + sys_write(wfd, (char __force_user *)victim, body_len);
62553 sys_close(wfd);
62554 - do_utime(vcollected, mtime);
62555 + do_utime((char __force_user *)vcollected, mtime);
62556 kfree(vcollected);
62557 eat(body_len);
62558 state = SkipIt;
62559 return 0;
62560 } else {
62561 - sys_write(wfd, victim, count);
62562 + sys_write(wfd, (char __force_user *)victim, count);
62563 body_len -= count;
62564 eat(count);
62565 return 1;
62566 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62567 {
62568 collected[N_ALIGN(name_len) + body_len] = '\0';
62569 clean_path(collected, 0);
62570 - sys_symlink(collected + N_ALIGN(name_len), collected);
62571 - sys_lchown(collected, uid, gid);
62572 - do_utime(collected, mtime);
62573 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62574 + sys_lchown((char __force_user *)collected, uid, gid);
62575 + do_utime((char __force_user *)collected, mtime);
62576 state = SkipIt;
62577 next_state = Reset;
62578 return 0;
62579 diff --git a/init/main.c b/init/main.c
62580 index 217ed23..ec5406f 100644
62581 --- a/init/main.c
62582 +++ b/init/main.c
62583 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62584 extern void tc_init(void);
62585 #endif
62586
62587 +extern void grsecurity_init(void);
62588 +
62589 /*
62590 * Debug helper: via this flag we know that we are in 'early bootup code'
62591 * where only the boot processor is running with IRQ disabled. This means
62592 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62593
62594 __setup("reset_devices", set_reset_devices);
62595
62596 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62597 +extern char pax_enter_kernel_user[];
62598 +extern char pax_exit_kernel_user[];
62599 +extern pgdval_t clone_pgd_mask;
62600 +#endif
62601 +
62602 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62603 +static int __init setup_pax_nouderef(char *str)
62604 +{
62605 +#ifdef CONFIG_X86_32
62606 + unsigned int cpu;
62607 + struct desc_struct *gdt;
62608 +
62609 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62610 + gdt = get_cpu_gdt_table(cpu);
62611 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62612 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62613 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62614 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62615 + }
62616 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62617 +#else
62618 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62619 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62620 + clone_pgd_mask = ~(pgdval_t)0UL;
62621 +#endif
62622 +
62623 + return 0;
62624 +}
62625 +early_param("pax_nouderef", setup_pax_nouderef);
62626 +#endif
62627 +
62628 +#ifdef CONFIG_PAX_SOFTMODE
62629 +int pax_softmode;
62630 +
62631 +static int __init setup_pax_softmode(char *str)
62632 +{
62633 + get_option(&str, &pax_softmode);
62634 + return 1;
62635 +}
62636 +__setup("pax_softmode=", setup_pax_softmode);
62637 +#endif
62638 +
62639 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62640 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62641 static const char *panic_later, *panic_param;
62642 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62643 {
62644 int count = preempt_count();
62645 int ret;
62646 + const char *msg1 = "", *msg2 = "";
62647
62648 if (initcall_debug)
62649 ret = do_one_initcall_debug(fn);
62650 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62651 sprintf(msgbuf, "error code %d ", ret);
62652
62653 if (preempt_count() != count) {
62654 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62655 + msg1 = " preemption imbalance";
62656 preempt_count() = count;
62657 }
62658 if (irqs_disabled()) {
62659 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62660 + msg2 = " disabled interrupts";
62661 local_irq_enable();
62662 }
62663 - if (msgbuf[0]) {
62664 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62665 + if (msgbuf[0] || *msg1 || *msg2) {
62666 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62667 }
62668
62669 return ret;
62670 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62671 do_basic_setup();
62672
62673 /* Open the /dev/console on the rootfs, this should never fail */
62674 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62675 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62676 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62677
62678 (void) sys_dup(0);
62679 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62680 if (!ramdisk_execute_command)
62681 ramdisk_execute_command = "/init";
62682
62683 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62684 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62685 ramdisk_execute_command = NULL;
62686 prepare_namespace();
62687 }
62688
62689 + grsecurity_init();
62690 +
62691 /*
62692 * Ok, we have completed the initial bootup, and
62693 * we're essentially up and running. Get rid of the
62694 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62695 index 5b4293d..f179875 100644
62696 --- a/ipc/mqueue.c
62697 +++ b/ipc/mqueue.c
62698 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62699 mq_bytes = (mq_msg_tblsz +
62700 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62701
62702 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62703 spin_lock(&mq_lock);
62704 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62705 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62706 diff --git a/ipc/msg.c b/ipc/msg.c
62707 index 7385de2..a8180e08 100644
62708 --- a/ipc/msg.c
62709 +++ b/ipc/msg.c
62710 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62711 return security_msg_queue_associate(msq, msgflg);
62712 }
62713
62714 +static struct ipc_ops msg_ops = {
62715 + .getnew = newque,
62716 + .associate = msg_security,
62717 + .more_checks = NULL
62718 +};
62719 +
62720 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62721 {
62722 struct ipc_namespace *ns;
62723 - struct ipc_ops msg_ops;
62724 struct ipc_params msg_params;
62725
62726 ns = current->nsproxy->ipc_ns;
62727
62728 - msg_ops.getnew = newque;
62729 - msg_ops.associate = msg_security;
62730 - msg_ops.more_checks = NULL;
62731 -
62732 msg_params.key = key;
62733 msg_params.flg = msgflg;
62734
62735 diff --git a/ipc/sem.c b/ipc/sem.c
62736 index 5215a81..cfc0cac 100644
62737 --- a/ipc/sem.c
62738 +++ b/ipc/sem.c
62739 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62740 return 0;
62741 }
62742
62743 +static struct ipc_ops sem_ops = {
62744 + .getnew = newary,
62745 + .associate = sem_security,
62746 + .more_checks = sem_more_checks
62747 +};
62748 +
62749 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62750 {
62751 struct ipc_namespace *ns;
62752 - struct ipc_ops sem_ops;
62753 struct ipc_params sem_params;
62754
62755 ns = current->nsproxy->ipc_ns;
62756 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62757 if (nsems < 0 || nsems > ns->sc_semmsl)
62758 return -EINVAL;
62759
62760 - sem_ops.getnew = newary;
62761 - sem_ops.associate = sem_security;
62762 - sem_ops.more_checks = sem_more_checks;
62763 -
62764 sem_params.key = key;
62765 sem_params.flg = semflg;
62766 sem_params.u.nsems = nsems;
62767 diff --git a/ipc/shm.c b/ipc/shm.c
62768 index b76be5b..859e750 100644
62769 --- a/ipc/shm.c
62770 +++ b/ipc/shm.c
62771 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62772 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62773 #endif
62774
62775 +#ifdef CONFIG_GRKERNSEC
62776 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62777 + const time_t shm_createtime, const uid_t cuid,
62778 + const int shmid);
62779 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62780 + const time_t shm_createtime);
62781 +#endif
62782 +
62783 void shm_init_ns(struct ipc_namespace *ns)
62784 {
62785 ns->shm_ctlmax = SHMMAX;
62786 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62787 shp->shm_lprid = 0;
62788 shp->shm_atim = shp->shm_dtim = 0;
62789 shp->shm_ctim = get_seconds();
62790 +#ifdef CONFIG_GRKERNSEC
62791 + {
62792 + struct timespec timeval;
62793 + do_posix_clock_monotonic_gettime(&timeval);
62794 +
62795 + shp->shm_createtime = timeval.tv_sec;
62796 + }
62797 +#endif
62798 shp->shm_segsz = size;
62799 shp->shm_nattch = 0;
62800 shp->shm_file = file;
62801 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62802 return 0;
62803 }
62804
62805 +static struct ipc_ops shm_ops = {
62806 + .getnew = newseg,
62807 + .associate = shm_security,
62808 + .more_checks = shm_more_checks
62809 +};
62810 +
62811 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62812 {
62813 struct ipc_namespace *ns;
62814 - struct ipc_ops shm_ops;
62815 struct ipc_params shm_params;
62816
62817 ns = current->nsproxy->ipc_ns;
62818
62819 - shm_ops.getnew = newseg;
62820 - shm_ops.associate = shm_security;
62821 - shm_ops.more_checks = shm_more_checks;
62822 -
62823 shm_params.key = key;
62824 shm_params.flg = shmflg;
62825 shm_params.u.size = size;
62826 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62827 f_mode = FMODE_READ | FMODE_WRITE;
62828 }
62829 if (shmflg & SHM_EXEC) {
62830 +
62831 +#ifdef CONFIG_PAX_MPROTECT
62832 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62833 + goto out;
62834 +#endif
62835 +
62836 prot |= PROT_EXEC;
62837 acc_mode |= S_IXUGO;
62838 }
62839 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62840 if (err)
62841 goto out_unlock;
62842
62843 +#ifdef CONFIG_GRKERNSEC
62844 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62845 + shp->shm_perm.cuid, shmid) ||
62846 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62847 + err = -EACCES;
62848 + goto out_unlock;
62849 + }
62850 +#endif
62851 +
62852 path = shp->shm_file->f_path;
62853 path_get(&path);
62854 shp->shm_nattch++;
62855 +#ifdef CONFIG_GRKERNSEC
62856 + shp->shm_lapid = current->pid;
62857 +#endif
62858 size = i_size_read(path.dentry->d_inode);
62859 shm_unlock(shp);
62860
62861 diff --git a/kernel/acct.c b/kernel/acct.c
62862 index fa7eb3d..7faf116 100644
62863 --- a/kernel/acct.c
62864 +++ b/kernel/acct.c
62865 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62866 */
62867 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62868 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62869 - file->f_op->write(file, (char *)&ac,
62870 + file->f_op->write(file, (char __force_user *)&ac,
62871 sizeof(acct_t), &file->f_pos);
62872 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62873 set_fs(fs);
62874 diff --git a/kernel/audit.c b/kernel/audit.c
62875 index 09fae26..ed71d5b 100644
62876 --- a/kernel/audit.c
62877 +++ b/kernel/audit.c
62878 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62879 3) suppressed due to audit_rate_limit
62880 4) suppressed due to audit_backlog_limit
62881 */
62882 -static atomic_t audit_lost = ATOMIC_INIT(0);
62883 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62884
62885 /* The netlink socket. */
62886 static struct sock *audit_sock;
62887 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62888 unsigned long now;
62889 int print;
62890
62891 - atomic_inc(&audit_lost);
62892 + atomic_inc_unchecked(&audit_lost);
62893
62894 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62895
62896 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62897 printk(KERN_WARNING
62898 "audit: audit_lost=%d audit_rate_limit=%d "
62899 "audit_backlog_limit=%d\n",
62900 - atomic_read(&audit_lost),
62901 + atomic_read_unchecked(&audit_lost),
62902 audit_rate_limit,
62903 audit_backlog_limit);
62904 audit_panic(message);
62905 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62906 status_set.pid = audit_pid;
62907 status_set.rate_limit = audit_rate_limit;
62908 status_set.backlog_limit = audit_backlog_limit;
62909 - status_set.lost = atomic_read(&audit_lost);
62910 + status_set.lost = atomic_read_unchecked(&audit_lost);
62911 status_set.backlog = skb_queue_len(&audit_skb_queue);
62912 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62913 &status_set, sizeof(status_set));
62914 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62915 avail = audit_expand(ab,
62916 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62917 if (!avail)
62918 - goto out;
62919 + goto out_va_end;
62920 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62921 }
62922 - va_end(args2);
62923 if (len > 0)
62924 skb_put(skb, len);
62925 +out_va_end:
62926 + va_end(args2);
62927 out:
62928 return;
62929 }
62930 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62931 index 47b7fc1..c003c33 100644
62932 --- a/kernel/auditsc.c
62933 +++ b/kernel/auditsc.c
62934 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62935 struct audit_buffer **ab,
62936 struct audit_aux_data_execve *axi)
62937 {
62938 - int i;
62939 - size_t len, len_sent = 0;
62940 + int i, len;
62941 + size_t len_sent = 0;
62942 const char __user *p;
62943 char *buf;
62944
62945 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62946 }
62947
62948 /* global counter which is incremented every time something logs in */
62949 -static atomic_t session_id = ATOMIC_INIT(0);
62950 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62951
62952 /**
62953 * audit_set_loginuid - set a task's audit_context loginuid
62954 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62955 */
62956 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62957 {
62958 - unsigned int sessionid = atomic_inc_return(&session_id);
62959 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62960 struct audit_context *context = task->audit_context;
62961
62962 if (context && context->in_syscall) {
62963 diff --git a/kernel/capability.c b/kernel/capability.c
62964 index b463871..fa3ea1f 100644
62965 --- a/kernel/capability.c
62966 +++ b/kernel/capability.c
62967 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62968 * before modification is attempted and the application
62969 * fails.
62970 */
62971 + if (tocopy > ARRAY_SIZE(kdata))
62972 + return -EFAULT;
62973 +
62974 if (copy_to_user(dataptr, kdata, tocopy
62975 * sizeof(struct __user_cap_data_struct))) {
62976 return -EFAULT;
62977 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62978 BUG();
62979 }
62980
62981 - if (security_capable(ns, current_cred(), cap) == 0) {
62982 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62983 current->flags |= PF_SUPERPRIV;
62984 return true;
62985 }
62986 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62987 }
62988 EXPORT_SYMBOL(ns_capable);
62989
62990 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
62991 +{
62992 + if (unlikely(!cap_valid(cap))) {
62993 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62994 + BUG();
62995 + }
62996 +
62997 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62998 + current->flags |= PF_SUPERPRIV;
62999 + return true;
63000 + }
63001 + return false;
63002 +}
63003 +EXPORT_SYMBOL(ns_capable_nolog);
63004 +
63005 +bool capable_nolog(int cap)
63006 +{
63007 + return ns_capable_nolog(&init_user_ns, cap);
63008 +}
63009 +EXPORT_SYMBOL(capable_nolog);
63010 +
63011 /**
63012 * task_ns_capable - Determine whether current task has a superior
63013 * capability targeted at a specific task's user namespace.
63014 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
63015 }
63016 EXPORT_SYMBOL(task_ns_capable);
63017
63018 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
63019 +{
63020 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
63021 +}
63022 +EXPORT_SYMBOL(task_ns_capable_nolog);
63023 +
63024 /**
63025 * nsown_capable - Check superior capability to one's own user_ns
63026 * @cap: The capability in question
63027 diff --git a/kernel/compat.c b/kernel/compat.c
63028 index f346ced..aa2b1f4 100644
63029 --- a/kernel/compat.c
63030 +++ b/kernel/compat.c
63031 @@ -13,6 +13,7 @@
63032
63033 #include <linux/linkage.h>
63034 #include <linux/compat.h>
63035 +#include <linux/module.h>
63036 #include <linux/errno.h>
63037 #include <linux/time.h>
63038 #include <linux/signal.h>
63039 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
63040 mm_segment_t oldfs;
63041 long ret;
63042
63043 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
63044 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
63045 oldfs = get_fs();
63046 set_fs(KERNEL_DS);
63047 ret = hrtimer_nanosleep_restart(restart);
63048 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
63049 oldfs = get_fs();
63050 set_fs(KERNEL_DS);
63051 ret = hrtimer_nanosleep(&tu,
63052 - rmtp ? (struct timespec __user *)&rmt : NULL,
63053 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
63054 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
63055 set_fs(oldfs);
63056
63057 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
63058 mm_segment_t old_fs = get_fs();
63059
63060 set_fs(KERNEL_DS);
63061 - ret = sys_sigpending((old_sigset_t __user *) &s);
63062 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
63063 set_fs(old_fs);
63064 if (ret == 0)
63065 ret = put_user(s, set);
63066 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
63067 old_fs = get_fs();
63068 set_fs(KERNEL_DS);
63069 ret = sys_sigprocmask(how,
63070 - set ? (old_sigset_t __user *) &s : NULL,
63071 - oset ? (old_sigset_t __user *) &s : NULL);
63072 + set ? (old_sigset_t __force_user *) &s : NULL,
63073 + oset ? (old_sigset_t __force_user *) &s : NULL);
63074 set_fs(old_fs);
63075 if (ret == 0)
63076 if (oset)
63077 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
63078 mm_segment_t old_fs = get_fs();
63079
63080 set_fs(KERNEL_DS);
63081 - ret = sys_old_getrlimit(resource, &r);
63082 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
63083 set_fs(old_fs);
63084
63085 if (!ret) {
63086 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
63087 mm_segment_t old_fs = get_fs();
63088
63089 set_fs(KERNEL_DS);
63090 - ret = sys_getrusage(who, (struct rusage __user *) &r);
63091 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
63092 set_fs(old_fs);
63093
63094 if (ret)
63095 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
63096 set_fs (KERNEL_DS);
63097 ret = sys_wait4(pid,
63098 (stat_addr ?
63099 - (unsigned int __user *) &status : NULL),
63100 - options, (struct rusage __user *) &r);
63101 + (unsigned int __force_user *) &status : NULL),
63102 + options, (struct rusage __force_user *) &r);
63103 set_fs (old_fs);
63104
63105 if (ret > 0) {
63106 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
63107 memset(&info, 0, sizeof(info));
63108
63109 set_fs(KERNEL_DS);
63110 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
63111 - uru ? (struct rusage __user *)&ru : NULL);
63112 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
63113 + uru ? (struct rusage __force_user *)&ru : NULL);
63114 set_fs(old_fs);
63115
63116 if ((ret < 0) || (info.si_signo == 0))
63117 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
63118 oldfs = get_fs();
63119 set_fs(KERNEL_DS);
63120 err = sys_timer_settime(timer_id, flags,
63121 - (struct itimerspec __user *) &newts,
63122 - (struct itimerspec __user *) &oldts);
63123 + (struct itimerspec __force_user *) &newts,
63124 + (struct itimerspec __force_user *) &oldts);
63125 set_fs(oldfs);
63126 if (!err && old && put_compat_itimerspec(old, &oldts))
63127 return -EFAULT;
63128 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
63129 oldfs = get_fs();
63130 set_fs(KERNEL_DS);
63131 err = sys_timer_gettime(timer_id,
63132 - (struct itimerspec __user *) &ts);
63133 + (struct itimerspec __force_user *) &ts);
63134 set_fs(oldfs);
63135 if (!err && put_compat_itimerspec(setting, &ts))
63136 return -EFAULT;
63137 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
63138 oldfs = get_fs();
63139 set_fs(KERNEL_DS);
63140 err = sys_clock_settime(which_clock,
63141 - (struct timespec __user *) &ts);
63142 + (struct timespec __force_user *) &ts);
63143 set_fs(oldfs);
63144 return err;
63145 }
63146 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
63147 oldfs = get_fs();
63148 set_fs(KERNEL_DS);
63149 err = sys_clock_gettime(which_clock,
63150 - (struct timespec __user *) &ts);
63151 + (struct timespec __force_user *) &ts);
63152 set_fs(oldfs);
63153 if (!err && put_compat_timespec(&ts, tp))
63154 return -EFAULT;
63155 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
63156
63157 oldfs = get_fs();
63158 set_fs(KERNEL_DS);
63159 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
63160 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
63161 set_fs(oldfs);
63162
63163 err = compat_put_timex(utp, &txc);
63164 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
63165 oldfs = get_fs();
63166 set_fs(KERNEL_DS);
63167 err = sys_clock_getres(which_clock,
63168 - (struct timespec __user *) &ts);
63169 + (struct timespec __force_user *) &ts);
63170 set_fs(oldfs);
63171 if (!err && tp && put_compat_timespec(&ts, tp))
63172 return -EFAULT;
63173 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
63174 long err;
63175 mm_segment_t oldfs;
63176 struct timespec tu;
63177 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
63178 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
63179
63180 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
63181 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
63182 oldfs = get_fs();
63183 set_fs(KERNEL_DS);
63184 err = clock_nanosleep_restart(restart);
63185 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
63186 oldfs = get_fs();
63187 set_fs(KERNEL_DS);
63188 err = sys_clock_nanosleep(which_clock, flags,
63189 - (struct timespec __user *) &in,
63190 - (struct timespec __user *) &out);
63191 + (struct timespec __force_user *) &in,
63192 + (struct timespec __force_user *) &out);
63193 set_fs(oldfs);
63194
63195 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
63196 diff --git a/kernel/configs.c b/kernel/configs.c
63197 index 42e8fa0..9e7406b 100644
63198 --- a/kernel/configs.c
63199 +++ b/kernel/configs.c
63200 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
63201 struct proc_dir_entry *entry;
63202
63203 /* create the current config file */
63204 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63205 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
63206 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
63207 + &ikconfig_file_ops);
63208 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63209 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
63210 + &ikconfig_file_ops);
63211 +#endif
63212 +#else
63213 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
63214 &ikconfig_file_ops);
63215 +#endif
63216 +
63217 if (!entry)
63218 return -ENOMEM;
63219
63220 diff --git a/kernel/cred.c b/kernel/cred.c
63221 index 5791612..a3c04dc 100644
63222 --- a/kernel/cred.c
63223 +++ b/kernel/cred.c
63224 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
63225 validate_creds(cred);
63226 put_cred(cred);
63227 }
63228 +
63229 +#ifdef CONFIG_GRKERNSEC_SETXID
63230 + cred = (struct cred *) tsk->delayed_cred;
63231 + if (cred) {
63232 + tsk->delayed_cred = NULL;
63233 + validate_creds(cred);
63234 + put_cred(cred);
63235 + }
63236 +#endif
63237 }
63238
63239 /**
63240 @@ -470,7 +479,7 @@ error_put:
63241 * Always returns 0 thus allowing this function to be tail-called at the end
63242 * of, say, sys_setgid().
63243 */
63244 -int commit_creds(struct cred *new)
63245 +static int __commit_creds(struct cred *new)
63246 {
63247 struct task_struct *task = current;
63248 const struct cred *old = task->real_cred;
63249 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63250
63251 get_cred(new); /* we will require a ref for the subj creds too */
63252
63253 + gr_set_role_label(task, new->uid, new->gid);
63254 +
63255 /* dumpability changes */
63256 if (old->euid != new->euid ||
63257 old->egid != new->egid ||
63258 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63259 put_cred(old);
63260 return 0;
63261 }
63262 +#ifdef CONFIG_GRKERNSEC_SETXID
63263 +extern int set_user(struct cred *new);
63264 +
63265 +void gr_delayed_cred_worker(void)
63266 +{
63267 + const struct cred *new = current->delayed_cred;
63268 + struct cred *ncred;
63269 +
63270 + current->delayed_cred = NULL;
63271 +
63272 + if (current_uid() && new != NULL) {
63273 + // from doing get_cred on it when queueing this
63274 + put_cred(new);
63275 + return;
63276 + } else if (new == NULL)
63277 + return;
63278 +
63279 + ncred = prepare_creds();
63280 + if (!ncred)
63281 + goto die;
63282 + // uids
63283 + ncred->uid = new->uid;
63284 + ncred->euid = new->euid;
63285 + ncred->suid = new->suid;
63286 + ncred->fsuid = new->fsuid;
63287 + // gids
63288 + ncred->gid = new->gid;
63289 + ncred->egid = new->egid;
63290 + ncred->sgid = new->sgid;
63291 + ncred->fsgid = new->fsgid;
63292 + // groups
63293 + if (set_groups(ncred, new->group_info) < 0) {
63294 + abort_creds(ncred);
63295 + goto die;
63296 + }
63297 + // caps
63298 + ncred->securebits = new->securebits;
63299 + ncred->cap_inheritable = new->cap_inheritable;
63300 + ncred->cap_permitted = new->cap_permitted;
63301 + ncred->cap_effective = new->cap_effective;
63302 + ncred->cap_bset = new->cap_bset;
63303 +
63304 + if (set_user(ncred)) {
63305 + abort_creds(ncred);
63306 + goto die;
63307 + }
63308 +
63309 + // from doing get_cred on it when queueing this
63310 + put_cred(new);
63311 +
63312 + __commit_creds(ncred);
63313 + return;
63314 +die:
63315 + // from doing get_cred on it when queueing this
63316 + put_cred(new);
63317 + do_group_exit(SIGKILL);
63318 +}
63319 +#endif
63320 +
63321 +int commit_creds(struct cred *new)
63322 +{
63323 +#ifdef CONFIG_GRKERNSEC_SETXID
63324 + struct task_struct *t;
63325 +
63326 + /* we won't get called with tasklist_lock held for writing
63327 + and interrupts disabled as the cred struct in that case is
63328 + init_cred
63329 + */
63330 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63331 + !current_uid() && new->uid) {
63332 + rcu_read_lock();
63333 + read_lock(&tasklist_lock);
63334 + for (t = next_thread(current); t != current;
63335 + t = next_thread(t)) {
63336 + if (t->delayed_cred == NULL) {
63337 + t->delayed_cred = get_cred(new);
63338 + set_tsk_need_resched(t);
63339 + }
63340 + }
63341 + read_unlock(&tasklist_lock);
63342 + rcu_read_unlock();
63343 + }
63344 +#endif
63345 + return __commit_creds(new);
63346 +}
63347 +
63348 EXPORT_SYMBOL(commit_creds);
63349
63350 /**
63351 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63352 index 0d7c087..01b8cef 100644
63353 --- a/kernel/debug/debug_core.c
63354 +++ b/kernel/debug/debug_core.c
63355 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63356 */
63357 static atomic_t masters_in_kgdb;
63358 static atomic_t slaves_in_kgdb;
63359 -static atomic_t kgdb_break_tasklet_var;
63360 +static atomic_unchecked_t kgdb_break_tasklet_var;
63361 atomic_t kgdb_setting_breakpoint;
63362
63363 struct task_struct *kgdb_usethread;
63364 @@ -129,7 +129,7 @@ int kgdb_single_step;
63365 static pid_t kgdb_sstep_pid;
63366
63367 /* to keep track of the CPU which is doing the single stepping*/
63368 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63369 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63370
63371 /*
63372 * If you are debugging a problem where roundup (the collection of
63373 @@ -542,7 +542,7 @@ return_normal:
63374 * kernel will only try for the value of sstep_tries before
63375 * giving up and continuing on.
63376 */
63377 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63378 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63379 (kgdb_info[cpu].task &&
63380 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63381 atomic_set(&kgdb_active, -1);
63382 @@ -636,8 +636,8 @@ cpu_master_loop:
63383 }
63384
63385 kgdb_restore:
63386 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63387 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63388 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63389 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63390 if (kgdb_info[sstep_cpu].task)
63391 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63392 else
63393 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63394 static void kgdb_tasklet_bpt(unsigned long ing)
63395 {
63396 kgdb_breakpoint();
63397 - atomic_set(&kgdb_break_tasklet_var, 0);
63398 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63399 }
63400
63401 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63402
63403 void kgdb_schedule_breakpoint(void)
63404 {
63405 - if (atomic_read(&kgdb_break_tasklet_var) ||
63406 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63407 atomic_read(&kgdb_active) != -1 ||
63408 atomic_read(&kgdb_setting_breakpoint))
63409 return;
63410 - atomic_inc(&kgdb_break_tasklet_var);
63411 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63412 tasklet_schedule(&kgdb_tasklet_breakpoint);
63413 }
63414 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63415 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63416 index 63786e7..0780cac 100644
63417 --- a/kernel/debug/kdb/kdb_main.c
63418 +++ b/kernel/debug/kdb/kdb_main.c
63419 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63420 list_for_each_entry(mod, kdb_modules, list) {
63421
63422 kdb_printf("%-20s%8u 0x%p ", mod->name,
63423 - mod->core_size, (void *)mod);
63424 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63425 #ifdef CONFIG_MODULE_UNLOAD
63426 kdb_printf("%4d ", module_refcount(mod));
63427 #endif
63428 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63429 kdb_printf(" (Loading)");
63430 else
63431 kdb_printf(" (Live)");
63432 - kdb_printf(" 0x%p", mod->module_core);
63433 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63434
63435 #ifdef CONFIG_MODULE_UNLOAD
63436 {
63437 diff --git a/kernel/events/core.c b/kernel/events/core.c
63438 index 58690af..d903d75 100644
63439 --- a/kernel/events/core.c
63440 +++ b/kernel/events/core.c
63441 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63442 return 0;
63443 }
63444
63445 -static atomic64_t perf_event_id;
63446 +static atomic64_unchecked_t perf_event_id;
63447
63448 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63449 enum event_type_t event_type);
63450 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63451
63452 static inline u64 perf_event_count(struct perf_event *event)
63453 {
63454 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63455 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63456 }
63457
63458 static u64 perf_event_read(struct perf_event *event)
63459 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63460 mutex_lock(&event->child_mutex);
63461 total += perf_event_read(event);
63462 *enabled += event->total_time_enabled +
63463 - atomic64_read(&event->child_total_time_enabled);
63464 + atomic64_read_unchecked(&event->child_total_time_enabled);
63465 *running += event->total_time_running +
63466 - atomic64_read(&event->child_total_time_running);
63467 + atomic64_read_unchecked(&event->child_total_time_running);
63468
63469 list_for_each_entry(child, &event->child_list, child_list) {
63470 total += perf_event_read(child);
63471 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63472 userpg->offset -= local64_read(&event->hw.prev_count);
63473
63474 userpg->time_enabled = enabled +
63475 - atomic64_read(&event->child_total_time_enabled);
63476 + atomic64_read_unchecked(&event->child_total_time_enabled);
63477
63478 userpg->time_running = running +
63479 - atomic64_read(&event->child_total_time_running);
63480 + atomic64_read_unchecked(&event->child_total_time_running);
63481
63482 barrier();
63483 ++userpg->lock;
63484 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63485 values[n++] = perf_event_count(event);
63486 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63487 values[n++] = enabled +
63488 - atomic64_read(&event->child_total_time_enabled);
63489 + atomic64_read_unchecked(&event->child_total_time_enabled);
63490 }
63491 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63492 values[n++] = running +
63493 - atomic64_read(&event->child_total_time_running);
63494 + atomic64_read_unchecked(&event->child_total_time_running);
63495 }
63496 if (read_format & PERF_FORMAT_ID)
63497 values[n++] = primary_event_id(event);
63498 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63499 * need to add enough zero bytes after the string to handle
63500 * the 64bit alignment we do later.
63501 */
63502 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63503 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63504 if (!buf) {
63505 name = strncpy(tmp, "//enomem", sizeof(tmp));
63506 goto got_name;
63507 }
63508 - name = d_path(&file->f_path, buf, PATH_MAX);
63509 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63510 if (IS_ERR(name)) {
63511 name = strncpy(tmp, "//toolong", sizeof(tmp));
63512 goto got_name;
63513 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63514 event->parent = parent_event;
63515
63516 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63517 - event->id = atomic64_inc_return(&perf_event_id);
63518 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63519
63520 event->state = PERF_EVENT_STATE_INACTIVE;
63521
63522 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63523 /*
63524 * Add back the child's count to the parent's count:
63525 */
63526 - atomic64_add(child_val, &parent_event->child_count);
63527 - atomic64_add(child_event->total_time_enabled,
63528 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63529 + atomic64_add_unchecked(child_event->total_time_enabled,
63530 &parent_event->child_total_time_enabled);
63531 - atomic64_add(child_event->total_time_running,
63532 + atomic64_add_unchecked(child_event->total_time_running,
63533 &parent_event->child_total_time_running);
63534
63535 /*
63536 diff --git a/kernel/exit.c b/kernel/exit.c
63537 index e6e01b9..619f837 100644
63538 --- a/kernel/exit.c
63539 +++ b/kernel/exit.c
63540 @@ -57,6 +57,10 @@
63541 #include <asm/pgtable.h>
63542 #include <asm/mmu_context.h>
63543
63544 +#ifdef CONFIG_GRKERNSEC
63545 +extern rwlock_t grsec_exec_file_lock;
63546 +#endif
63547 +
63548 static void exit_mm(struct task_struct * tsk);
63549
63550 static void __unhash_process(struct task_struct *p, bool group_dead)
63551 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63552 struct task_struct *leader;
63553 int zap_leader;
63554 repeat:
63555 +#ifdef CONFIG_NET
63556 + gr_del_task_from_ip_table(p);
63557 +#endif
63558 +
63559 /* don't need to get the RCU readlock here - the process is dead and
63560 * can't be modifying its own credentials. But shut RCU-lockdep up */
63561 rcu_read_lock();
63562 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63563 * know it'll be handled, so that they don't get converted to
63564 * SIGKILL or just silently dropped.
63565 */
63566 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63567 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63568 recalc_sigpending();
63569 spin_unlock_irq(&current->sighand->siglock);
63570 return 0;
63571 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63572 vsnprintf(current->comm, sizeof(current->comm), name, args);
63573 va_end(args);
63574
63575 +#ifdef CONFIG_GRKERNSEC
63576 + write_lock(&grsec_exec_file_lock);
63577 + if (current->exec_file) {
63578 + fput(current->exec_file);
63579 + current->exec_file = NULL;
63580 + }
63581 + write_unlock(&grsec_exec_file_lock);
63582 +#endif
63583 +
63584 + gr_set_kernel_label(current);
63585 +
63586 /*
63587 * If we were started as result of loading a module, close all of the
63588 * user space pages. We don't need them, and if we didn't close them
63589 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63590 struct task_struct *tsk = current;
63591 int group_dead;
63592
63593 + set_fs(USER_DS);
63594 +
63595 profile_task_exit(tsk);
63596
63597 WARN_ON(blk_needs_flush_plug(tsk));
63598 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63599 * mm_release()->clear_child_tid() from writing to a user-controlled
63600 * kernel address.
63601 */
63602 - set_fs(USER_DS);
63603
63604 ptrace_event(PTRACE_EVENT_EXIT, code);
63605
63606 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63607 tsk->exit_code = code;
63608 taskstats_exit(tsk, group_dead);
63609
63610 + gr_acl_handle_psacct(tsk, code);
63611 + gr_acl_handle_exit();
63612 +
63613 exit_mm(tsk);
63614
63615 if (group_dead)
63616 diff --git a/kernel/fork.c b/kernel/fork.c
63617 index 0acf42c0..9e40e2e 100644
63618 --- a/kernel/fork.c
63619 +++ b/kernel/fork.c
63620 @@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63621 *stackend = STACK_END_MAGIC; /* for overflow detection */
63622
63623 #ifdef CONFIG_CC_STACKPROTECTOR
63624 - tsk->stack_canary = get_random_int();
63625 + tsk->stack_canary = pax_get_random_long();
63626 #endif
63627
63628 /*
63629 @@ -305,13 +305,77 @@ out:
63630 }
63631
63632 #ifdef CONFIG_MMU
63633 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63634 +{
63635 + struct vm_area_struct *tmp;
63636 + unsigned long charge;
63637 + struct mempolicy *pol;
63638 + struct file *file;
63639 +
63640 + charge = 0;
63641 + if (mpnt->vm_flags & VM_ACCOUNT) {
63642 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63643 + if (security_vm_enough_memory(len))
63644 + goto fail_nomem;
63645 + charge = len;
63646 + }
63647 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63648 + if (!tmp)
63649 + goto fail_nomem;
63650 + *tmp = *mpnt;
63651 + tmp->vm_mm = mm;
63652 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63653 + pol = mpol_dup(vma_policy(mpnt));
63654 + if (IS_ERR(pol))
63655 + goto fail_nomem_policy;
63656 + vma_set_policy(tmp, pol);
63657 + if (anon_vma_fork(tmp, mpnt))
63658 + goto fail_nomem_anon_vma_fork;
63659 + tmp->vm_flags &= ~VM_LOCKED;
63660 + tmp->vm_next = tmp->vm_prev = NULL;
63661 + tmp->vm_mirror = NULL;
63662 + file = tmp->vm_file;
63663 + if (file) {
63664 + struct inode *inode = file->f_path.dentry->d_inode;
63665 + struct address_space *mapping = file->f_mapping;
63666 +
63667 + get_file(file);
63668 + if (tmp->vm_flags & VM_DENYWRITE)
63669 + atomic_dec(&inode->i_writecount);
63670 + mutex_lock(&mapping->i_mmap_mutex);
63671 + if (tmp->vm_flags & VM_SHARED)
63672 + mapping->i_mmap_writable++;
63673 + flush_dcache_mmap_lock(mapping);
63674 + /* insert tmp into the share list, just after mpnt */
63675 + vma_prio_tree_add(tmp, mpnt);
63676 + flush_dcache_mmap_unlock(mapping);
63677 + mutex_unlock(&mapping->i_mmap_mutex);
63678 + }
63679 +
63680 + /*
63681 + * Clear hugetlb-related page reserves for children. This only
63682 + * affects MAP_PRIVATE mappings. Faults generated by the child
63683 + * are not guaranteed to succeed, even if read-only
63684 + */
63685 + if (is_vm_hugetlb_page(tmp))
63686 + reset_vma_resv_huge_pages(tmp);
63687 +
63688 + return tmp;
63689 +
63690 +fail_nomem_anon_vma_fork:
63691 + mpol_put(pol);
63692 +fail_nomem_policy:
63693 + kmem_cache_free(vm_area_cachep, tmp);
63694 +fail_nomem:
63695 + vm_unacct_memory(charge);
63696 + return NULL;
63697 +}
63698 +
63699 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63700 {
63701 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63702 struct rb_node **rb_link, *rb_parent;
63703 int retval;
63704 - unsigned long charge;
63705 - struct mempolicy *pol;
63706
63707 down_write(&oldmm->mmap_sem);
63708 flush_cache_dup_mm(oldmm);
63709 @@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63710 mm->locked_vm = 0;
63711 mm->mmap = NULL;
63712 mm->mmap_cache = NULL;
63713 - mm->free_area_cache = oldmm->mmap_base;
63714 - mm->cached_hole_size = ~0UL;
63715 + mm->free_area_cache = oldmm->free_area_cache;
63716 + mm->cached_hole_size = oldmm->cached_hole_size;
63717 mm->map_count = 0;
63718 cpumask_clear(mm_cpumask(mm));
63719 mm->mm_rb = RB_ROOT;
63720 @@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63721
63722 prev = NULL;
63723 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63724 - struct file *file;
63725 -
63726 if (mpnt->vm_flags & VM_DONTCOPY) {
63727 long pages = vma_pages(mpnt);
63728 mm->total_vm -= pages;
63729 @@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63730 -pages);
63731 continue;
63732 }
63733 - charge = 0;
63734 - if (mpnt->vm_flags & VM_ACCOUNT) {
63735 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63736 - if (security_vm_enough_memory(len))
63737 - goto fail_nomem;
63738 - charge = len;
63739 + tmp = dup_vma(mm, mpnt);
63740 + if (!tmp) {
63741 + retval = -ENOMEM;
63742 + goto out;
63743 }
63744 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63745 - if (!tmp)
63746 - goto fail_nomem;
63747 - *tmp = *mpnt;
63748 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63749 - pol = mpol_dup(vma_policy(mpnt));
63750 - retval = PTR_ERR(pol);
63751 - if (IS_ERR(pol))
63752 - goto fail_nomem_policy;
63753 - vma_set_policy(tmp, pol);
63754 - tmp->vm_mm = mm;
63755 - if (anon_vma_fork(tmp, mpnt))
63756 - goto fail_nomem_anon_vma_fork;
63757 - tmp->vm_flags &= ~VM_LOCKED;
63758 - tmp->vm_next = tmp->vm_prev = NULL;
63759 - file = tmp->vm_file;
63760 - if (file) {
63761 - struct inode *inode = file->f_path.dentry->d_inode;
63762 - struct address_space *mapping = file->f_mapping;
63763 -
63764 - get_file(file);
63765 - if (tmp->vm_flags & VM_DENYWRITE)
63766 - atomic_dec(&inode->i_writecount);
63767 - mutex_lock(&mapping->i_mmap_mutex);
63768 - if (tmp->vm_flags & VM_SHARED)
63769 - mapping->i_mmap_writable++;
63770 - flush_dcache_mmap_lock(mapping);
63771 - /* insert tmp into the share list, just after mpnt */
63772 - vma_prio_tree_add(tmp, mpnt);
63773 - flush_dcache_mmap_unlock(mapping);
63774 - mutex_unlock(&mapping->i_mmap_mutex);
63775 - }
63776 -
63777 - /*
63778 - * Clear hugetlb-related page reserves for children. This only
63779 - * affects MAP_PRIVATE mappings. Faults generated by the child
63780 - * are not guaranteed to succeed, even if read-only
63781 - */
63782 - if (is_vm_hugetlb_page(tmp))
63783 - reset_vma_resv_huge_pages(tmp);
63784
63785 /*
63786 * Link in the new vma and copy the page table entries.
63787 @@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63788 if (retval)
63789 goto out;
63790 }
63791 +
63792 +#ifdef CONFIG_PAX_SEGMEXEC
63793 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63794 + struct vm_area_struct *mpnt_m;
63795 +
63796 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63797 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63798 +
63799 + if (!mpnt->vm_mirror)
63800 + continue;
63801 +
63802 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63803 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63804 + mpnt->vm_mirror = mpnt_m;
63805 + } else {
63806 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63807 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63808 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63809 + mpnt->vm_mirror->vm_mirror = mpnt;
63810 + }
63811 + }
63812 + BUG_ON(mpnt_m);
63813 + }
63814 +#endif
63815 +
63816 /* a new mm has just been created */
63817 arch_dup_mmap(oldmm, mm);
63818 retval = 0;
63819 @@ -426,14 +471,6 @@ out:
63820 flush_tlb_mm(oldmm);
63821 up_write(&oldmm->mmap_sem);
63822 return retval;
63823 -fail_nomem_anon_vma_fork:
63824 - mpol_put(pol);
63825 -fail_nomem_policy:
63826 - kmem_cache_free(vm_area_cachep, tmp);
63827 -fail_nomem:
63828 - retval = -ENOMEM;
63829 - vm_unacct_memory(charge);
63830 - goto out;
63831 }
63832
63833 static inline int mm_alloc_pgd(struct mm_struct *mm)
63834 @@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63835 }
63836 EXPORT_SYMBOL_GPL(get_task_mm);
63837
63838 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63839 +{
63840 + struct mm_struct *mm;
63841 + int err;
63842 +
63843 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63844 + if (err)
63845 + return ERR_PTR(err);
63846 +
63847 + mm = get_task_mm(task);
63848 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63849 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63850 + mmput(mm);
63851 + mm = ERR_PTR(-EACCES);
63852 + }
63853 + mutex_unlock(&task->signal->cred_guard_mutex);
63854 +
63855 + return mm;
63856 +}
63857 +
63858 /* Please note the differences between mmput and mm_release.
63859 * mmput is called whenever we stop holding onto a mm_struct,
63860 * error success whatever.
63861 @@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63862 spin_unlock(&fs->lock);
63863 return -EAGAIN;
63864 }
63865 - fs->users++;
63866 + atomic_inc(&fs->users);
63867 spin_unlock(&fs->lock);
63868 return 0;
63869 }
63870 tsk->fs = copy_fs_struct(fs);
63871 if (!tsk->fs)
63872 return -ENOMEM;
63873 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63874 return 0;
63875 }
63876
63877 @@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63878 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63879 #endif
63880 retval = -EAGAIN;
63881 +
63882 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63883 +
63884 if (atomic_read(&p->real_cred->user->processes) >=
63885 task_rlimit(p, RLIMIT_NPROC)) {
63886 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63887 @@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63888 if (clone_flags & CLONE_THREAD)
63889 p->tgid = current->tgid;
63890
63891 + gr_copy_label(p);
63892 +
63893 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63894 /*
63895 * Clear TID on mm_release()?
63896 @@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
63897 bad_fork_free:
63898 free_task(p);
63899 fork_out:
63900 + gr_log_forkfail(retval);
63901 +
63902 return ERR_PTR(retval);
63903 }
63904
63905 @@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
63906 if (clone_flags & CLONE_PARENT_SETTID)
63907 put_user(nr, parent_tidptr);
63908
63909 + gr_handle_brute_check();
63910 +
63911 if (clone_flags & CLONE_VFORK) {
63912 p->vfork_done = &vfork;
63913 init_completion(&vfork);
63914 @@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63915 return 0;
63916
63917 /* don't need lock here; in the worst case we'll do useless copy */
63918 - if (fs->users == 1)
63919 + if (atomic_read(&fs->users) == 1)
63920 return 0;
63921
63922 *new_fsp = copy_fs_struct(fs);
63923 @@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63924 fs = current->fs;
63925 spin_lock(&fs->lock);
63926 current->fs = new_fs;
63927 - if (--fs->users)
63928 + gr_set_chroot_entries(current, &current->fs->root);
63929 + if (atomic_dec_return(&fs->users))
63930 new_fs = NULL;
63931 else
63932 new_fs = fs;
63933 diff --git a/kernel/futex.c b/kernel/futex.c
63934 index 1614be2..37abc7e 100644
63935 --- a/kernel/futex.c
63936 +++ b/kernel/futex.c
63937 @@ -54,6 +54,7 @@
63938 #include <linux/mount.h>
63939 #include <linux/pagemap.h>
63940 #include <linux/syscalls.h>
63941 +#include <linux/ptrace.h>
63942 #include <linux/signal.h>
63943 #include <linux/export.h>
63944 #include <linux/magic.h>
63945 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63946 struct page *page, *page_head;
63947 int err, ro = 0;
63948
63949 +#ifdef CONFIG_PAX_SEGMEXEC
63950 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63951 + return -EFAULT;
63952 +#endif
63953 +
63954 /*
63955 * The futex address must be "naturally" aligned.
63956 */
63957 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63958 if (!p)
63959 goto err_unlock;
63960 ret = -EPERM;
63961 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63962 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63963 + goto err_unlock;
63964 +#endif
63965 pcred = __task_cred(p);
63966 /* If victim is in different user_ns, then uids are not
63967 comparable, so we must have CAP_SYS_PTRACE */
63968 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63969 {
63970 u32 curval;
63971 int i;
63972 + mm_segment_t oldfs;
63973
63974 /*
63975 * This will fail and we want it. Some arch implementations do
63976 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63977 * implementation, the non-functional ones will return
63978 * -ENOSYS.
63979 */
63980 + oldfs = get_fs();
63981 + set_fs(USER_DS);
63982 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63983 futex_cmpxchg_enabled = 1;
63984 + set_fs(oldfs);
63985
63986 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63987 plist_head_init(&futex_queues[i].chain);
63988 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63989 index 5f9e689..582d46d 100644
63990 --- a/kernel/futex_compat.c
63991 +++ b/kernel/futex_compat.c
63992 @@ -10,6 +10,7 @@
63993 #include <linux/compat.h>
63994 #include <linux/nsproxy.h>
63995 #include <linux/futex.h>
63996 +#include <linux/ptrace.h>
63997
63998 #include <asm/uaccess.h>
63999
64000 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64001 {
64002 struct compat_robust_list_head __user *head;
64003 unsigned long ret;
64004 - const struct cred *cred = current_cred(), *pcred;
64005 + const struct cred *cred = current_cred();
64006 + const struct cred *pcred;
64007
64008 if (!futex_cmpxchg_enabled)
64009 return -ENOSYS;
64010 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
64011 if (!p)
64012 goto err_unlock;
64013 ret = -EPERM;
64014 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64015 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
64016 + goto err_unlock;
64017 +#endif
64018 pcred = __task_cred(p);
64019 /* If victim is in different user_ns, then uids are not
64020 comparable, so we must have CAP_SYS_PTRACE */
64021 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
64022 index 9b22d03..6295b62 100644
64023 --- a/kernel/gcov/base.c
64024 +++ b/kernel/gcov/base.c
64025 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
64026 }
64027
64028 #ifdef CONFIG_MODULES
64029 -static inline int within(void *addr, void *start, unsigned long size)
64030 -{
64031 - return ((addr >= start) && (addr < start + size));
64032 -}
64033 -
64034 /* Update list and generate events when modules are unloaded. */
64035 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64036 void *data)
64037 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
64038 prev = NULL;
64039 /* Remove entries located in module from linked list. */
64040 for (info = gcov_info_head; info; info = info->next) {
64041 - if (within(info, mod->module_core, mod->core_size)) {
64042 + if (within_module_core_rw((unsigned long)info, mod)) {
64043 if (prev)
64044 prev->next = info->next;
64045 else
64046 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
64047 index ae34bf5..4e2f3d0 100644
64048 --- a/kernel/hrtimer.c
64049 +++ b/kernel/hrtimer.c
64050 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
64051 local_irq_restore(flags);
64052 }
64053
64054 -static void run_hrtimer_softirq(struct softirq_action *h)
64055 +static void run_hrtimer_softirq(void)
64056 {
64057 hrtimer_peek_ahead_timers();
64058 }
64059 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
64060 index 66ff710..05a5128 100644
64061 --- a/kernel/jump_label.c
64062 +++ b/kernel/jump_label.c
64063 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
64064
64065 size = (((unsigned long)stop - (unsigned long)start)
64066 / sizeof(struct jump_entry));
64067 + pax_open_kernel();
64068 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
64069 + pax_close_kernel();
64070 }
64071
64072 static void jump_label_update(struct jump_label_key *key, int enable);
64073 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
64074 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
64075 struct jump_entry *iter;
64076
64077 + pax_open_kernel();
64078 for (iter = iter_start; iter < iter_stop; iter++) {
64079 if (within_module_init(iter->code, mod))
64080 iter->code = 0;
64081 }
64082 + pax_close_kernel();
64083 }
64084
64085 static int
64086 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
64087 index 079f1d3..a407562 100644
64088 --- a/kernel/kallsyms.c
64089 +++ b/kernel/kallsyms.c
64090 @@ -11,6 +11,9 @@
64091 * Changed the compression method from stem compression to "table lookup"
64092 * compression (see scripts/kallsyms.c for a more complete description)
64093 */
64094 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64095 +#define __INCLUDED_BY_HIDESYM 1
64096 +#endif
64097 #include <linux/kallsyms.h>
64098 #include <linux/module.h>
64099 #include <linux/init.h>
64100 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
64101
64102 static inline int is_kernel_inittext(unsigned long addr)
64103 {
64104 + if (system_state != SYSTEM_BOOTING)
64105 + return 0;
64106 +
64107 if (addr >= (unsigned long)_sinittext
64108 && addr <= (unsigned long)_einittext)
64109 return 1;
64110 return 0;
64111 }
64112
64113 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64114 +#ifdef CONFIG_MODULES
64115 +static inline int is_module_text(unsigned long addr)
64116 +{
64117 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
64118 + return 1;
64119 +
64120 + addr = ktla_ktva(addr);
64121 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
64122 +}
64123 +#else
64124 +static inline int is_module_text(unsigned long addr)
64125 +{
64126 + return 0;
64127 +}
64128 +#endif
64129 +#endif
64130 +
64131 static inline int is_kernel_text(unsigned long addr)
64132 {
64133 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
64134 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
64135
64136 static inline int is_kernel(unsigned long addr)
64137 {
64138 +
64139 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64140 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
64141 + return 1;
64142 +
64143 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
64144 +#else
64145 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
64146 +#endif
64147 +
64148 return 1;
64149 return in_gate_area_no_mm(addr);
64150 }
64151
64152 static int is_ksym_addr(unsigned long addr)
64153 {
64154 +
64155 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64156 + if (is_module_text(addr))
64157 + return 0;
64158 +#endif
64159 +
64160 if (all_var)
64161 return is_kernel(addr);
64162
64163 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
64164
64165 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
64166 {
64167 - iter->name[0] = '\0';
64168 iter->nameoff = get_symbol_offset(new_pos);
64169 iter->pos = new_pos;
64170 }
64171 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
64172 {
64173 struct kallsym_iter *iter = m->private;
64174
64175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64176 + if (current_uid())
64177 + return 0;
64178 +#endif
64179 +
64180 /* Some debugging symbols have no name. Ignore them. */
64181 if (!iter->name[0])
64182 return 0;
64183 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
64184 struct kallsym_iter *iter;
64185 int ret;
64186
64187 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
64188 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
64189 if (!iter)
64190 return -ENOMEM;
64191 reset_iter(iter, 0);
64192 diff --git a/kernel/kexec.c b/kernel/kexec.c
64193 index dc7bc08..4601964 100644
64194 --- a/kernel/kexec.c
64195 +++ b/kernel/kexec.c
64196 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
64197 unsigned long flags)
64198 {
64199 struct compat_kexec_segment in;
64200 - struct kexec_segment out, __user *ksegments;
64201 + struct kexec_segment out;
64202 + struct kexec_segment __user *ksegments;
64203 unsigned long i, result;
64204
64205 /* Don't allow clients that don't understand the native
64206 diff --git a/kernel/kmod.c b/kernel/kmod.c
64207 index a4bea97..7a1ae9a 100644
64208 --- a/kernel/kmod.c
64209 +++ b/kernel/kmod.c
64210 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
64211 * If module auto-loading support is disabled then this function
64212 * becomes a no-operation.
64213 */
64214 -int __request_module(bool wait, const char *fmt, ...)
64215 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
64216 {
64217 - va_list args;
64218 char module_name[MODULE_NAME_LEN];
64219 unsigned int max_modprobes;
64220 int ret;
64221 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
64222 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
64223 static char *envp[] = { "HOME=/",
64224 "TERM=linux",
64225 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
64226 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
64227 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
64228 static int kmod_loop_msg;
64229
64230 - va_start(args, fmt);
64231 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
64232 - va_end(args);
64233 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
64234 if (ret >= MODULE_NAME_LEN)
64235 return -ENAMETOOLONG;
64236
64237 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
64238 if (ret)
64239 return ret;
64240
64241 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64242 + if (!current_uid()) {
64243 + /* hack to workaround consolekit/udisks stupidity */
64244 + read_lock(&tasklist_lock);
64245 + if (!strcmp(current->comm, "mount") &&
64246 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
64247 + read_unlock(&tasklist_lock);
64248 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
64249 + return -EPERM;
64250 + }
64251 + read_unlock(&tasklist_lock);
64252 + }
64253 +#endif
64254 +
64255 /* If modprobe needs a service that is in a module, we get a recursive
64256 * loop. Limit the number of running kmod threads to max_threads/2 or
64257 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64258 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64259 atomic_dec(&kmod_concurrent);
64260 return ret;
64261 }
64262 +
64263 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64264 +{
64265 + va_list args;
64266 + int ret;
64267 +
64268 + va_start(args, fmt);
64269 + ret = ____request_module(wait, module_param, fmt, args);
64270 + va_end(args);
64271 +
64272 + return ret;
64273 +}
64274 +
64275 +int __request_module(bool wait, const char *fmt, ...)
64276 +{
64277 + va_list args;
64278 + int ret;
64279 +
64280 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64281 + if (current_uid()) {
64282 + char module_param[MODULE_NAME_LEN];
64283 +
64284 + memset(module_param, 0, sizeof(module_param));
64285 +
64286 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64287 +
64288 + va_start(args, fmt);
64289 + ret = ____request_module(wait, module_param, fmt, args);
64290 + va_end(args);
64291 +
64292 + return ret;
64293 + }
64294 +#endif
64295 +
64296 + va_start(args, fmt);
64297 + ret = ____request_module(wait, NULL, fmt, args);
64298 + va_end(args);
64299 +
64300 + return ret;
64301 +}
64302 +
64303 EXPORT_SYMBOL(__request_module);
64304 #endif /* CONFIG_MODULES */
64305
64306 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64307 *
64308 * Thus the __user pointer cast is valid here.
64309 */
64310 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
64311 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64312
64313 /*
64314 * If ret is 0, either ____call_usermodehelper failed and the
64315 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64316 index faa39d1..d7ad37e 100644
64317 --- a/kernel/kprobes.c
64318 +++ b/kernel/kprobes.c
64319 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64320 * kernel image and loaded module images reside. This is required
64321 * so x86_64 can correctly handle the %rip-relative fixups.
64322 */
64323 - kip->insns = module_alloc(PAGE_SIZE);
64324 + kip->insns = module_alloc_exec(PAGE_SIZE);
64325 if (!kip->insns) {
64326 kfree(kip);
64327 return NULL;
64328 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64329 */
64330 if (!list_is_singular(&kip->list)) {
64331 list_del(&kip->list);
64332 - module_free(NULL, kip->insns);
64333 + module_free_exec(NULL, kip->insns);
64334 kfree(kip);
64335 }
64336 return 1;
64337 @@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64338 {
64339 int i, err = 0;
64340 unsigned long offset = 0, size = 0;
64341 - char *modname, namebuf[128];
64342 + char *modname, namebuf[KSYM_NAME_LEN];
64343 const char *symbol_name;
64344 void *addr;
64345 struct kprobe_blackpoint *kb;
64346 @@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64347 const char *sym = NULL;
64348 unsigned int i = *(loff_t *) v;
64349 unsigned long offset = 0;
64350 - char *modname, namebuf[128];
64351 + char *modname, namebuf[KSYM_NAME_LEN];
64352
64353 head = &kprobe_table[i];
64354 preempt_disable();
64355 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64356 index b2e08c9..01d8049 100644
64357 --- a/kernel/lockdep.c
64358 +++ b/kernel/lockdep.c
64359 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64360 end = (unsigned long) &_end,
64361 addr = (unsigned long) obj;
64362
64363 +#ifdef CONFIG_PAX_KERNEXEC
64364 + start = ktla_ktva(start);
64365 +#endif
64366 +
64367 /*
64368 * static variable?
64369 */
64370 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64371 if (!static_obj(lock->key)) {
64372 debug_locks_off();
64373 printk("INFO: trying to register non-static key.\n");
64374 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64375 printk("the code is fine but needs lockdep annotation.\n");
64376 printk("turning off the locking correctness validator.\n");
64377 dump_stack();
64378 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64379 if (!class)
64380 return 0;
64381 }
64382 - atomic_inc((atomic_t *)&class->ops);
64383 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64384 if (very_verbose(class)) {
64385 printk("\nacquire class [%p] %s", class->key, class->name);
64386 if (class->name_version > 1)
64387 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64388 index 91c32a0..b2c71c5 100644
64389 --- a/kernel/lockdep_proc.c
64390 +++ b/kernel/lockdep_proc.c
64391 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64392
64393 static void print_name(struct seq_file *m, struct lock_class *class)
64394 {
64395 - char str[128];
64396 + char str[KSYM_NAME_LEN];
64397 const char *name = class->name;
64398
64399 if (!name) {
64400 diff --git a/kernel/module.c b/kernel/module.c
64401 index 178333c..04e3408 100644
64402 --- a/kernel/module.c
64403 +++ b/kernel/module.c
64404 @@ -58,6 +58,7 @@
64405 #include <linux/jump_label.h>
64406 #include <linux/pfn.h>
64407 #include <linux/bsearch.h>
64408 +#include <linux/grsecurity.h>
64409
64410 #define CREATE_TRACE_POINTS
64411 #include <trace/events/module.h>
64412 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64413
64414 /* Bounds of module allocation, for speeding __module_address.
64415 * Protected by module_mutex. */
64416 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64417 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64418 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64419
64420 int register_module_notifier(struct notifier_block * nb)
64421 {
64422 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64423 return true;
64424
64425 list_for_each_entry_rcu(mod, &modules, list) {
64426 - struct symsearch arr[] = {
64427 + struct symsearch modarr[] = {
64428 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64429 NOT_GPL_ONLY, false },
64430 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64431 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64432 #endif
64433 };
64434
64435 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64436 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64437 return true;
64438 }
64439 return false;
64440 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64441 static int percpu_modalloc(struct module *mod,
64442 unsigned long size, unsigned long align)
64443 {
64444 - if (align > PAGE_SIZE) {
64445 + if (align-1 >= PAGE_SIZE) {
64446 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64447 mod->name, align, PAGE_SIZE);
64448 align = PAGE_SIZE;
64449 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64450 */
64451 #ifdef CONFIG_SYSFS
64452
64453 -#ifdef CONFIG_KALLSYMS
64454 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64455 static inline bool sect_empty(const Elf_Shdr *sect)
64456 {
64457 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64458 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64459
64460 static void unset_module_core_ro_nx(struct module *mod)
64461 {
64462 - set_page_attributes(mod->module_core + mod->core_text_size,
64463 - mod->module_core + mod->core_size,
64464 + set_page_attributes(mod->module_core_rw,
64465 + mod->module_core_rw + mod->core_size_rw,
64466 set_memory_x);
64467 - set_page_attributes(mod->module_core,
64468 - mod->module_core + mod->core_ro_size,
64469 + set_page_attributes(mod->module_core_rx,
64470 + mod->module_core_rx + mod->core_size_rx,
64471 set_memory_rw);
64472 }
64473
64474 static void unset_module_init_ro_nx(struct module *mod)
64475 {
64476 - set_page_attributes(mod->module_init + mod->init_text_size,
64477 - mod->module_init + mod->init_size,
64478 + set_page_attributes(mod->module_init_rw,
64479 + mod->module_init_rw + mod->init_size_rw,
64480 set_memory_x);
64481 - set_page_attributes(mod->module_init,
64482 - mod->module_init + mod->init_ro_size,
64483 + set_page_attributes(mod->module_init_rx,
64484 + mod->module_init_rx + mod->init_size_rx,
64485 set_memory_rw);
64486 }
64487
64488 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64489
64490 mutex_lock(&module_mutex);
64491 list_for_each_entry_rcu(mod, &modules, list) {
64492 - if ((mod->module_core) && (mod->core_text_size)) {
64493 - set_page_attributes(mod->module_core,
64494 - mod->module_core + mod->core_text_size,
64495 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64496 + set_page_attributes(mod->module_core_rx,
64497 + mod->module_core_rx + mod->core_size_rx,
64498 set_memory_rw);
64499 }
64500 - if ((mod->module_init) && (mod->init_text_size)) {
64501 - set_page_attributes(mod->module_init,
64502 - mod->module_init + mod->init_text_size,
64503 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64504 + set_page_attributes(mod->module_init_rx,
64505 + mod->module_init_rx + mod->init_size_rx,
64506 set_memory_rw);
64507 }
64508 }
64509 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64510
64511 mutex_lock(&module_mutex);
64512 list_for_each_entry_rcu(mod, &modules, list) {
64513 - if ((mod->module_core) && (mod->core_text_size)) {
64514 - set_page_attributes(mod->module_core,
64515 - mod->module_core + mod->core_text_size,
64516 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64517 + set_page_attributes(mod->module_core_rx,
64518 + mod->module_core_rx + mod->core_size_rx,
64519 set_memory_ro);
64520 }
64521 - if ((mod->module_init) && (mod->init_text_size)) {
64522 - set_page_attributes(mod->module_init,
64523 - mod->module_init + mod->init_text_size,
64524 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64525 + set_page_attributes(mod->module_init_rx,
64526 + mod->module_init_rx + mod->init_size_rx,
64527 set_memory_ro);
64528 }
64529 }
64530 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64531
64532 /* This may be NULL, but that's OK */
64533 unset_module_init_ro_nx(mod);
64534 - module_free(mod, mod->module_init);
64535 + module_free(mod, mod->module_init_rw);
64536 + module_free_exec(mod, mod->module_init_rx);
64537 kfree(mod->args);
64538 percpu_modfree(mod);
64539
64540 /* Free lock-classes: */
64541 - lockdep_free_key_range(mod->module_core, mod->core_size);
64542 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64543 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64544
64545 /* Finally, free the core (containing the module structure) */
64546 unset_module_core_ro_nx(mod);
64547 - module_free(mod, mod->module_core);
64548 + module_free_exec(mod, mod->module_core_rx);
64549 + module_free(mod, mod->module_core_rw);
64550
64551 #ifdef CONFIG_MPU
64552 update_protections(current->mm);
64553 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64554 unsigned int i;
64555 int ret = 0;
64556 const struct kernel_symbol *ksym;
64557 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64558 + int is_fs_load = 0;
64559 + int register_filesystem_found = 0;
64560 + char *p;
64561 +
64562 + p = strstr(mod->args, "grsec_modharden_fs");
64563 + if (p) {
64564 + char *endptr = p + strlen("grsec_modharden_fs");
64565 + /* copy \0 as well */
64566 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64567 + is_fs_load = 1;
64568 + }
64569 +#endif
64570
64571 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64572 const char *name = info->strtab + sym[i].st_name;
64573
64574 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64575 + /* it's a real shame this will never get ripped and copied
64576 + upstream! ;(
64577 + */
64578 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64579 + register_filesystem_found = 1;
64580 +#endif
64581 +
64582 switch (sym[i].st_shndx) {
64583 case SHN_COMMON:
64584 /* We compiled with -fno-common. These are not
64585 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64586 ksym = resolve_symbol_wait(mod, info, name);
64587 /* Ok if resolved. */
64588 if (ksym && !IS_ERR(ksym)) {
64589 + pax_open_kernel();
64590 sym[i].st_value = ksym->value;
64591 + pax_close_kernel();
64592 break;
64593 }
64594
64595 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64596 secbase = (unsigned long)mod_percpu(mod);
64597 else
64598 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64599 + pax_open_kernel();
64600 sym[i].st_value += secbase;
64601 + pax_close_kernel();
64602 break;
64603 }
64604 }
64605
64606 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64607 + if (is_fs_load && !register_filesystem_found) {
64608 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64609 + ret = -EPERM;
64610 + }
64611 +#endif
64612 +
64613 return ret;
64614 }
64615
64616 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64617 || s->sh_entsize != ~0UL
64618 || strstarts(sname, ".init"))
64619 continue;
64620 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64621 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64622 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64623 + else
64624 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64625 DEBUGP("\t%s\n", name);
64626 }
64627 - switch (m) {
64628 - case 0: /* executable */
64629 - mod->core_size = debug_align(mod->core_size);
64630 - mod->core_text_size = mod->core_size;
64631 - break;
64632 - case 1: /* RO: text and ro-data */
64633 - mod->core_size = debug_align(mod->core_size);
64634 - mod->core_ro_size = mod->core_size;
64635 - break;
64636 - case 3: /* whole core */
64637 - mod->core_size = debug_align(mod->core_size);
64638 - break;
64639 - }
64640 }
64641
64642 DEBUGP("Init section allocation order:\n");
64643 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64644 || s->sh_entsize != ~0UL
64645 || !strstarts(sname, ".init"))
64646 continue;
64647 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64648 - | INIT_OFFSET_MASK);
64649 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64650 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64651 + else
64652 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64653 + s->sh_entsize |= INIT_OFFSET_MASK;
64654 DEBUGP("\t%s\n", sname);
64655 }
64656 - switch (m) {
64657 - case 0: /* executable */
64658 - mod->init_size = debug_align(mod->init_size);
64659 - mod->init_text_size = mod->init_size;
64660 - break;
64661 - case 1: /* RO: text and ro-data */
64662 - mod->init_size = debug_align(mod->init_size);
64663 - mod->init_ro_size = mod->init_size;
64664 - break;
64665 - case 3: /* whole init */
64666 - mod->init_size = debug_align(mod->init_size);
64667 - break;
64668 - }
64669 }
64670 }
64671
64672 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64673
64674 /* Put symbol section at end of init part of module. */
64675 symsect->sh_flags |= SHF_ALLOC;
64676 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64677 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64678 info->index.sym) | INIT_OFFSET_MASK;
64679 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64680
64681 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64682 }
64683
64684 /* Append room for core symbols at end of core part. */
64685 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64686 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64687 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64688 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64689
64690 /* Put string table section at end of init part of module. */
64691 strsect->sh_flags |= SHF_ALLOC;
64692 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64693 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64694 info->index.str) | INIT_OFFSET_MASK;
64695 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64696
64697 /* Append room for core symbols' strings at end of core part. */
64698 - info->stroffs = mod->core_size;
64699 + info->stroffs = mod->core_size_rx;
64700 __set_bit(0, info->strmap);
64701 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64702 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64703 }
64704
64705 static void add_kallsyms(struct module *mod, const struct load_info *info)
64706 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64707 /* Make sure we get permanent strtab: don't use info->strtab. */
64708 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64709
64710 + pax_open_kernel();
64711 +
64712 /* Set types up while we still have access to sections. */
64713 for (i = 0; i < mod->num_symtab; i++)
64714 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64715
64716 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64717 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64718 src = mod->symtab;
64719 *dst = *src;
64720 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64721 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64722 }
64723 mod->core_num_syms = ndst;
64724
64725 - mod->core_strtab = s = mod->module_core + info->stroffs;
64726 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64727 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64728 if (test_bit(i, info->strmap))
64729 *++s = mod->strtab[i];
64730 +
64731 + pax_close_kernel();
64732 }
64733 #else
64734 static inline void layout_symtab(struct module *mod, struct load_info *info)
64735 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64736 return size == 0 ? NULL : vmalloc_exec(size);
64737 }
64738
64739 -static void *module_alloc_update_bounds(unsigned long size)
64740 +static void *module_alloc_update_bounds_rw(unsigned long size)
64741 {
64742 void *ret = module_alloc(size);
64743
64744 if (ret) {
64745 mutex_lock(&module_mutex);
64746 /* Update module bounds. */
64747 - if ((unsigned long)ret < module_addr_min)
64748 - module_addr_min = (unsigned long)ret;
64749 - if ((unsigned long)ret + size > module_addr_max)
64750 - module_addr_max = (unsigned long)ret + size;
64751 + if ((unsigned long)ret < module_addr_min_rw)
64752 + module_addr_min_rw = (unsigned long)ret;
64753 + if ((unsigned long)ret + size > module_addr_max_rw)
64754 + module_addr_max_rw = (unsigned long)ret + size;
64755 + mutex_unlock(&module_mutex);
64756 + }
64757 + return ret;
64758 +}
64759 +
64760 +static void *module_alloc_update_bounds_rx(unsigned long size)
64761 +{
64762 + void *ret = module_alloc_exec(size);
64763 +
64764 + if (ret) {
64765 + mutex_lock(&module_mutex);
64766 + /* Update module bounds. */
64767 + if ((unsigned long)ret < module_addr_min_rx)
64768 + module_addr_min_rx = (unsigned long)ret;
64769 + if ((unsigned long)ret + size > module_addr_max_rx)
64770 + module_addr_max_rx = (unsigned long)ret + size;
64771 mutex_unlock(&module_mutex);
64772 }
64773 return ret;
64774 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64775 static int check_modinfo(struct module *mod, struct load_info *info)
64776 {
64777 const char *modmagic = get_modinfo(info, "vermagic");
64778 + const char *license = get_modinfo(info, "license");
64779 int err;
64780
64781 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64782 + if (!license || !license_is_gpl_compatible(license))
64783 + return -ENOEXEC;
64784 +#endif
64785 +
64786 /* This is allowed: modprobe --force will invalidate it. */
64787 if (!modmagic) {
64788 err = try_to_force_load(mod, "bad vermagic");
64789 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64790 }
64791
64792 /* Set up license info based on the info section */
64793 - set_license(mod, get_modinfo(info, "license"));
64794 + set_license(mod, license);
64795
64796 return 0;
64797 }
64798 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64799 void *ptr;
64800
64801 /* Do the allocs. */
64802 - ptr = module_alloc_update_bounds(mod->core_size);
64803 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64804 /*
64805 * The pointer to this block is stored in the module structure
64806 * which is inside the block. Just mark it as not being a
64807 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64808 if (!ptr)
64809 return -ENOMEM;
64810
64811 - memset(ptr, 0, mod->core_size);
64812 - mod->module_core = ptr;
64813 + memset(ptr, 0, mod->core_size_rw);
64814 + mod->module_core_rw = ptr;
64815
64816 - ptr = module_alloc_update_bounds(mod->init_size);
64817 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64818 /*
64819 * The pointer to this block is stored in the module structure
64820 * which is inside the block. This block doesn't need to be
64821 * scanned as it contains data and code that will be freed
64822 * after the module is initialized.
64823 */
64824 - kmemleak_ignore(ptr);
64825 - if (!ptr && mod->init_size) {
64826 - module_free(mod, mod->module_core);
64827 + kmemleak_not_leak(ptr);
64828 + if (!ptr && mod->init_size_rw) {
64829 + module_free(mod, mod->module_core_rw);
64830 return -ENOMEM;
64831 }
64832 - memset(ptr, 0, mod->init_size);
64833 - mod->module_init = ptr;
64834 + memset(ptr, 0, mod->init_size_rw);
64835 + mod->module_init_rw = ptr;
64836 +
64837 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64838 + kmemleak_not_leak(ptr);
64839 + if (!ptr) {
64840 + module_free(mod, mod->module_init_rw);
64841 + module_free(mod, mod->module_core_rw);
64842 + return -ENOMEM;
64843 + }
64844 +
64845 + pax_open_kernel();
64846 + memset(ptr, 0, mod->core_size_rx);
64847 + pax_close_kernel();
64848 + mod->module_core_rx = ptr;
64849 +
64850 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64851 + kmemleak_not_leak(ptr);
64852 + if (!ptr && mod->init_size_rx) {
64853 + module_free_exec(mod, mod->module_core_rx);
64854 + module_free(mod, mod->module_init_rw);
64855 + module_free(mod, mod->module_core_rw);
64856 + return -ENOMEM;
64857 + }
64858 +
64859 + pax_open_kernel();
64860 + memset(ptr, 0, mod->init_size_rx);
64861 + pax_close_kernel();
64862 + mod->module_init_rx = ptr;
64863
64864 /* Transfer each section which specifies SHF_ALLOC */
64865 DEBUGP("final section addresses:\n");
64866 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64867 if (!(shdr->sh_flags & SHF_ALLOC))
64868 continue;
64869
64870 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64871 - dest = mod->module_init
64872 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64873 - else
64874 - dest = mod->module_core + shdr->sh_entsize;
64875 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64876 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64877 + dest = mod->module_init_rw
64878 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64879 + else
64880 + dest = mod->module_init_rx
64881 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64882 + } else {
64883 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64884 + dest = mod->module_core_rw + shdr->sh_entsize;
64885 + else
64886 + dest = mod->module_core_rx + shdr->sh_entsize;
64887 + }
64888 +
64889 + if (shdr->sh_type != SHT_NOBITS) {
64890 +
64891 +#ifdef CONFIG_PAX_KERNEXEC
64892 +#ifdef CONFIG_X86_64
64893 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64894 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64895 +#endif
64896 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64897 + pax_open_kernel();
64898 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64899 + pax_close_kernel();
64900 + } else
64901 +#endif
64902
64903 - if (shdr->sh_type != SHT_NOBITS)
64904 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64905 + }
64906 /* Update sh_addr to point to copy in image. */
64907 - shdr->sh_addr = (unsigned long)dest;
64908 +
64909 +#ifdef CONFIG_PAX_KERNEXEC
64910 + if (shdr->sh_flags & SHF_EXECINSTR)
64911 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64912 + else
64913 +#endif
64914 +
64915 + shdr->sh_addr = (unsigned long)dest;
64916 DEBUGP("\t0x%lx %s\n",
64917 shdr->sh_addr, info->secstrings + shdr->sh_name);
64918 }
64919 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64920 * Do it before processing of module parameters, so the module
64921 * can provide parameter accessor functions of its own.
64922 */
64923 - if (mod->module_init)
64924 - flush_icache_range((unsigned long)mod->module_init,
64925 - (unsigned long)mod->module_init
64926 - + mod->init_size);
64927 - flush_icache_range((unsigned long)mod->module_core,
64928 - (unsigned long)mod->module_core + mod->core_size);
64929 + if (mod->module_init_rx)
64930 + flush_icache_range((unsigned long)mod->module_init_rx,
64931 + (unsigned long)mod->module_init_rx
64932 + + mod->init_size_rx);
64933 + flush_icache_range((unsigned long)mod->module_core_rx,
64934 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64935
64936 set_fs(old_fs);
64937 }
64938 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64939 {
64940 kfree(info->strmap);
64941 percpu_modfree(mod);
64942 - module_free(mod, mod->module_init);
64943 - module_free(mod, mod->module_core);
64944 + module_free_exec(mod, mod->module_init_rx);
64945 + module_free_exec(mod, mod->module_core_rx);
64946 + module_free(mod, mod->module_init_rw);
64947 + module_free(mod, mod->module_core_rw);
64948 }
64949
64950 int __weak module_finalize(const Elf_Ehdr *hdr,
64951 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64952 if (err)
64953 goto free_unload;
64954
64955 + /* Now copy in args */
64956 + mod->args = strndup_user(uargs, ~0UL >> 1);
64957 + if (IS_ERR(mod->args)) {
64958 + err = PTR_ERR(mod->args);
64959 + goto free_unload;
64960 + }
64961 +
64962 /* Set up MODINFO_ATTR fields */
64963 setup_modinfo(mod, &info);
64964
64965 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64966 + {
64967 + char *p, *p2;
64968 +
64969 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64970 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64971 + err = -EPERM;
64972 + goto free_modinfo;
64973 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64974 + p += strlen("grsec_modharden_normal");
64975 + p2 = strstr(p, "_");
64976 + if (p2) {
64977 + *p2 = '\0';
64978 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64979 + *p2 = '_';
64980 + }
64981 + err = -EPERM;
64982 + goto free_modinfo;
64983 + }
64984 + }
64985 +#endif
64986 +
64987 /* Fix up syms, so that st_value is a pointer to location. */
64988 err = simplify_symbols(mod, &info);
64989 if (err < 0)
64990 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64991
64992 flush_module_icache(mod);
64993
64994 - /* Now copy in args */
64995 - mod->args = strndup_user(uargs, ~0UL >> 1);
64996 - if (IS_ERR(mod->args)) {
64997 - err = PTR_ERR(mod->args);
64998 - goto free_arch_cleanup;
64999 - }
65000 -
65001 /* Mark state as coming so strong_try_module_get() ignores us. */
65002 mod->state = MODULE_STATE_COMING;
65003
65004 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
65005 unlock:
65006 mutex_unlock(&module_mutex);
65007 synchronize_sched();
65008 - kfree(mod->args);
65009 - free_arch_cleanup:
65010 module_arch_cleanup(mod);
65011 free_modinfo:
65012 free_modinfo(mod);
65013 + kfree(mod->args);
65014 free_unload:
65015 module_unload_free(mod);
65016 free_module:
65017 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65018 MODULE_STATE_COMING, mod);
65019
65020 /* Set RO and NX regions for core */
65021 - set_section_ro_nx(mod->module_core,
65022 - mod->core_text_size,
65023 - mod->core_ro_size,
65024 - mod->core_size);
65025 + set_section_ro_nx(mod->module_core_rx,
65026 + mod->core_size_rx,
65027 + mod->core_size_rx,
65028 + mod->core_size_rx);
65029
65030 /* Set RO and NX regions for init */
65031 - set_section_ro_nx(mod->module_init,
65032 - mod->init_text_size,
65033 - mod->init_ro_size,
65034 - mod->init_size);
65035 + set_section_ro_nx(mod->module_init_rx,
65036 + mod->init_size_rx,
65037 + mod->init_size_rx,
65038 + mod->init_size_rx);
65039
65040 do_mod_ctors(mod);
65041 /* Start the module */
65042 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
65043 mod->strtab = mod->core_strtab;
65044 #endif
65045 unset_module_init_ro_nx(mod);
65046 - module_free(mod, mod->module_init);
65047 - mod->module_init = NULL;
65048 - mod->init_size = 0;
65049 - mod->init_ro_size = 0;
65050 - mod->init_text_size = 0;
65051 + module_free(mod, mod->module_init_rw);
65052 + module_free_exec(mod, mod->module_init_rx);
65053 + mod->module_init_rw = NULL;
65054 + mod->module_init_rx = NULL;
65055 + mod->init_size_rw = 0;
65056 + mod->init_size_rx = 0;
65057 mutex_unlock(&module_mutex);
65058
65059 return 0;
65060 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
65061 unsigned long nextval;
65062
65063 /* At worse, next value is at end of module */
65064 - if (within_module_init(addr, mod))
65065 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
65066 + if (within_module_init_rx(addr, mod))
65067 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
65068 + else if (within_module_init_rw(addr, mod))
65069 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
65070 + else if (within_module_core_rx(addr, mod))
65071 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
65072 + else if (within_module_core_rw(addr, mod))
65073 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
65074 else
65075 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
65076 + return NULL;
65077
65078 /* Scan for closest preceding symbol, and next symbol. (ELF
65079 starts real symbols at 1). */
65080 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
65081 char buf[8];
65082
65083 seq_printf(m, "%s %u",
65084 - mod->name, mod->init_size + mod->core_size);
65085 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
65086 print_unload_info(m, mod);
65087
65088 /* Informative for users. */
65089 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
65090 mod->state == MODULE_STATE_COMING ? "Loading":
65091 "Live");
65092 /* Used by oprofile and other similar tools. */
65093 - seq_printf(m, " 0x%pK", mod->module_core);
65094 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
65095
65096 /* Taints info */
65097 if (mod->taints)
65098 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
65099
65100 static int __init proc_modules_init(void)
65101 {
65102 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65103 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65104 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65105 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65106 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
65107 +#else
65108 proc_create("modules", 0, NULL, &proc_modules_operations);
65109 +#endif
65110 +#else
65111 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
65112 +#endif
65113 return 0;
65114 }
65115 module_init(proc_modules_init);
65116 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
65117 {
65118 struct module *mod;
65119
65120 - if (addr < module_addr_min || addr > module_addr_max)
65121 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
65122 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
65123 return NULL;
65124
65125 list_for_each_entry_rcu(mod, &modules, list)
65126 - if (within_module_core(addr, mod)
65127 - || within_module_init(addr, mod))
65128 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
65129 return mod;
65130 return NULL;
65131 }
65132 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
65133 */
65134 struct module *__module_text_address(unsigned long addr)
65135 {
65136 - struct module *mod = __module_address(addr);
65137 + struct module *mod;
65138 +
65139 +#ifdef CONFIG_X86_32
65140 + addr = ktla_ktva(addr);
65141 +#endif
65142 +
65143 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
65144 + return NULL;
65145 +
65146 + mod = __module_address(addr);
65147 +
65148 if (mod) {
65149 /* Make sure it's within the text section. */
65150 - if (!within(addr, mod->module_init, mod->init_text_size)
65151 - && !within(addr, mod->module_core, mod->core_text_size))
65152 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
65153 mod = NULL;
65154 }
65155 return mod;
65156 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
65157 index 7e3443f..b2a1e6b 100644
65158 --- a/kernel/mutex-debug.c
65159 +++ b/kernel/mutex-debug.c
65160 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
65161 }
65162
65163 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65164 - struct thread_info *ti)
65165 + struct task_struct *task)
65166 {
65167 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
65168
65169 /* Mark the current thread as blocked on the lock: */
65170 - ti->task->blocked_on = waiter;
65171 + task->blocked_on = waiter;
65172 }
65173
65174 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65175 - struct thread_info *ti)
65176 + struct task_struct *task)
65177 {
65178 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
65179 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
65180 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
65181 - ti->task->blocked_on = NULL;
65182 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
65183 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
65184 + task->blocked_on = NULL;
65185
65186 list_del_init(&waiter->list);
65187 waiter->task = NULL;
65188 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
65189 index 0799fd3..d06ae3b 100644
65190 --- a/kernel/mutex-debug.h
65191 +++ b/kernel/mutex-debug.h
65192 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
65193 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
65194 extern void debug_mutex_add_waiter(struct mutex *lock,
65195 struct mutex_waiter *waiter,
65196 - struct thread_info *ti);
65197 + struct task_struct *task);
65198 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
65199 - struct thread_info *ti);
65200 + struct task_struct *task);
65201 extern void debug_mutex_unlock(struct mutex *lock);
65202 extern void debug_mutex_init(struct mutex *lock, const char *name,
65203 struct lock_class_key *key);
65204 diff --git a/kernel/mutex.c b/kernel/mutex.c
65205 index 89096dd..f91ebc5 100644
65206 --- a/kernel/mutex.c
65207 +++ b/kernel/mutex.c
65208 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65209 spin_lock_mutex(&lock->wait_lock, flags);
65210
65211 debug_mutex_lock_common(lock, &waiter);
65212 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
65213 + debug_mutex_add_waiter(lock, &waiter, task);
65214
65215 /* add waiting tasks to the end of the waitqueue (FIFO): */
65216 list_add_tail(&waiter.list, &lock->wait_list);
65217 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65218 * TASK_UNINTERRUPTIBLE case.)
65219 */
65220 if (unlikely(signal_pending_state(state, task))) {
65221 - mutex_remove_waiter(lock, &waiter,
65222 - task_thread_info(task));
65223 + mutex_remove_waiter(lock, &waiter, task);
65224 mutex_release(&lock->dep_map, 1, ip);
65225 spin_unlock_mutex(&lock->wait_lock, flags);
65226
65227 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
65228 done:
65229 lock_acquired(&lock->dep_map, ip);
65230 /* got the lock - rejoice! */
65231 - mutex_remove_waiter(lock, &waiter, current_thread_info());
65232 + mutex_remove_waiter(lock, &waiter, task);
65233 mutex_set_owner(lock);
65234
65235 /* set it to 0 if there are no waiters left: */
65236 diff --git a/kernel/padata.c b/kernel/padata.c
65237 index b452599..5d68f4e 100644
65238 --- a/kernel/padata.c
65239 +++ b/kernel/padata.c
65240 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
65241 padata->pd = pd;
65242 padata->cb_cpu = cb_cpu;
65243
65244 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
65245 - atomic_set(&pd->seq_nr, -1);
65246 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
65247 + atomic_set_unchecked(&pd->seq_nr, -1);
65248
65249 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65250 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65251
65252 target_cpu = padata_cpu_hash(padata);
65253 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65254 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65255 padata_init_pqueues(pd);
65256 padata_init_squeues(pd);
65257 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65258 - atomic_set(&pd->seq_nr, -1);
65259 + atomic_set_unchecked(&pd->seq_nr, -1);
65260 atomic_set(&pd->reorder_objects, 0);
65261 atomic_set(&pd->refcnt, 0);
65262 pd->pinst = pinst;
65263 diff --git a/kernel/panic.c b/kernel/panic.c
65264 index 3458469..342c500 100644
65265 --- a/kernel/panic.c
65266 +++ b/kernel/panic.c
65267 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65268 va_end(args);
65269 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65270 #ifdef CONFIG_DEBUG_BUGVERBOSE
65271 - dump_stack();
65272 + /*
65273 + * Avoid nested stack-dumping if a panic occurs during oops processing
65274 + */
65275 + if (!oops_in_progress)
65276 + dump_stack();
65277 #endif
65278
65279 /*
65280 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65281 const char *board;
65282
65283 printk(KERN_WARNING "------------[ cut here ]------------\n");
65284 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65285 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65286 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65287 if (board)
65288 printk(KERN_WARNING "Hardware name: %s\n", board);
65289 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65290 */
65291 void __stack_chk_fail(void)
65292 {
65293 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
65294 + dump_stack();
65295 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65296 __builtin_return_address(0));
65297 }
65298 EXPORT_SYMBOL(__stack_chk_fail);
65299 diff --git a/kernel/pid.c b/kernel/pid.c
65300 index fa5f722..0c93e57 100644
65301 --- a/kernel/pid.c
65302 +++ b/kernel/pid.c
65303 @@ -33,6 +33,7 @@
65304 #include <linux/rculist.h>
65305 #include <linux/bootmem.h>
65306 #include <linux/hash.h>
65307 +#include <linux/security.h>
65308 #include <linux/pid_namespace.h>
65309 #include <linux/init_task.h>
65310 #include <linux/syscalls.h>
65311 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65312
65313 int pid_max = PID_MAX_DEFAULT;
65314
65315 -#define RESERVED_PIDS 300
65316 +#define RESERVED_PIDS 500
65317
65318 int pid_max_min = RESERVED_PIDS + 1;
65319 int pid_max_max = PID_MAX_LIMIT;
65320 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65321 */
65322 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65323 {
65324 + struct task_struct *task;
65325 +
65326 rcu_lockdep_assert(rcu_read_lock_held(),
65327 "find_task_by_pid_ns() needs rcu_read_lock()"
65328 " protection");
65329 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65330 +
65331 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65332 +
65333 + if (gr_pid_is_chrooted(task))
65334 + return NULL;
65335 +
65336 + return task;
65337 }
65338
65339 struct task_struct *find_task_by_vpid(pid_t vnr)
65340 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65341 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65342 }
65343
65344 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65345 +{
65346 + rcu_lockdep_assert(rcu_read_lock_held(),
65347 + "find_task_by_pid_ns() needs rcu_read_lock()"
65348 + " protection");
65349 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65350 +}
65351 +
65352 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65353 {
65354 struct pid *pid;
65355 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65356 index e7cb76d..75eceb3 100644
65357 --- a/kernel/posix-cpu-timers.c
65358 +++ b/kernel/posix-cpu-timers.c
65359 @@ -6,6 +6,7 @@
65360 #include <linux/posix-timers.h>
65361 #include <linux/errno.h>
65362 #include <linux/math64.h>
65363 +#include <linux/security.h>
65364 #include <asm/uaccess.h>
65365 #include <linux/kernel_stat.h>
65366 #include <trace/events/timer.h>
65367 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65368
65369 static __init int init_posix_cpu_timers(void)
65370 {
65371 - struct k_clock process = {
65372 + static struct k_clock process = {
65373 .clock_getres = process_cpu_clock_getres,
65374 .clock_get = process_cpu_clock_get,
65375 .timer_create = process_cpu_timer_create,
65376 .nsleep = process_cpu_nsleep,
65377 .nsleep_restart = process_cpu_nsleep_restart,
65378 };
65379 - struct k_clock thread = {
65380 + static struct k_clock thread = {
65381 .clock_getres = thread_cpu_clock_getres,
65382 .clock_get = thread_cpu_clock_get,
65383 .timer_create = thread_cpu_timer_create,
65384 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65385 index 69185ae..cc2847a 100644
65386 --- a/kernel/posix-timers.c
65387 +++ b/kernel/posix-timers.c
65388 @@ -43,6 +43,7 @@
65389 #include <linux/idr.h>
65390 #include <linux/posix-clock.h>
65391 #include <linux/posix-timers.h>
65392 +#include <linux/grsecurity.h>
65393 #include <linux/syscalls.h>
65394 #include <linux/wait.h>
65395 #include <linux/workqueue.h>
65396 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65397 * which we beg off on and pass to do_sys_settimeofday().
65398 */
65399
65400 -static struct k_clock posix_clocks[MAX_CLOCKS];
65401 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65402
65403 /*
65404 * These ones are defined below.
65405 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65406 */
65407 static __init int init_posix_timers(void)
65408 {
65409 - struct k_clock clock_realtime = {
65410 + static struct k_clock clock_realtime = {
65411 .clock_getres = hrtimer_get_res,
65412 .clock_get = posix_clock_realtime_get,
65413 .clock_set = posix_clock_realtime_set,
65414 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65415 .timer_get = common_timer_get,
65416 .timer_del = common_timer_del,
65417 };
65418 - struct k_clock clock_monotonic = {
65419 + static struct k_clock clock_monotonic = {
65420 .clock_getres = hrtimer_get_res,
65421 .clock_get = posix_ktime_get_ts,
65422 .nsleep = common_nsleep,
65423 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65424 .timer_get = common_timer_get,
65425 .timer_del = common_timer_del,
65426 };
65427 - struct k_clock clock_monotonic_raw = {
65428 + static struct k_clock clock_monotonic_raw = {
65429 .clock_getres = hrtimer_get_res,
65430 .clock_get = posix_get_monotonic_raw,
65431 };
65432 - struct k_clock clock_realtime_coarse = {
65433 + static struct k_clock clock_realtime_coarse = {
65434 .clock_getres = posix_get_coarse_res,
65435 .clock_get = posix_get_realtime_coarse,
65436 };
65437 - struct k_clock clock_monotonic_coarse = {
65438 + static struct k_clock clock_monotonic_coarse = {
65439 .clock_getres = posix_get_coarse_res,
65440 .clock_get = posix_get_monotonic_coarse,
65441 };
65442 - struct k_clock clock_boottime = {
65443 + static struct k_clock clock_boottime = {
65444 .clock_getres = hrtimer_get_res,
65445 .clock_get = posix_get_boottime,
65446 .nsleep = common_nsleep,
65447 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65448 return;
65449 }
65450
65451 - posix_clocks[clock_id] = *new_clock;
65452 + posix_clocks[clock_id] = new_clock;
65453 }
65454 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65455
65456 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65457 return (id & CLOCKFD_MASK) == CLOCKFD ?
65458 &clock_posix_dynamic : &clock_posix_cpu;
65459
65460 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65461 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65462 return NULL;
65463 - return &posix_clocks[id];
65464 + return posix_clocks[id];
65465 }
65466
65467 static int common_timer_create(struct k_itimer *new_timer)
65468 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65469 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65470 return -EFAULT;
65471
65472 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65473 + have their clock_set fptr set to a nosettime dummy function
65474 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65475 + call common_clock_set, which calls do_sys_settimeofday, which
65476 + we hook
65477 + */
65478 +
65479 return kc->clock_set(which_clock, &new_tp);
65480 }
65481
65482 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65483 index d523593..68197a4 100644
65484 --- a/kernel/power/poweroff.c
65485 +++ b/kernel/power/poweroff.c
65486 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65487 .enable_mask = SYSRQ_ENABLE_BOOT,
65488 };
65489
65490 -static int pm_sysrq_init(void)
65491 +static int __init pm_sysrq_init(void)
65492 {
65493 register_sysrq_key('o', &sysrq_poweroff_op);
65494 return 0;
65495 diff --git a/kernel/power/process.c b/kernel/power/process.c
65496 index 3d4b954..11af930 100644
65497 --- a/kernel/power/process.c
65498 +++ b/kernel/power/process.c
65499 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65500 u64 elapsed_csecs64;
65501 unsigned int elapsed_csecs;
65502 bool wakeup = false;
65503 + bool timedout = false;
65504
65505 do_gettimeofday(&start);
65506
65507 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65508
65509 while (true) {
65510 todo = 0;
65511 + if (time_after(jiffies, end_time))
65512 + timedout = true;
65513 read_lock(&tasklist_lock);
65514 do_each_thread(g, p) {
65515 if (frozen(p) || !freezable(p))
65516 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65517 * try_to_stop() after schedule() in ptrace/signal
65518 * stop sees TIF_FREEZE.
65519 */
65520 - if (!task_is_stopped_or_traced(p) &&
65521 - !freezer_should_skip(p))
65522 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65523 todo++;
65524 + if (timedout) {
65525 + printk(KERN_ERR "Task refusing to freeze:\n");
65526 + sched_show_task(p);
65527 + }
65528 + }
65529 } while_each_thread(g, p);
65530 read_unlock(&tasklist_lock);
65531
65532 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65533 todo += wq_busy;
65534 }
65535
65536 - if (!todo || time_after(jiffies, end_time))
65537 + if (!todo || timedout)
65538 break;
65539
65540 if (pm_wakeup_pending()) {
65541 diff --git a/kernel/printk.c b/kernel/printk.c
65542 index 7982a0a..2095fdc 100644
65543 --- a/kernel/printk.c
65544 +++ b/kernel/printk.c
65545 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65546 if (from_file && type != SYSLOG_ACTION_OPEN)
65547 return 0;
65548
65549 +#ifdef CONFIG_GRKERNSEC_DMESG
65550 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65551 + return -EPERM;
65552 +#endif
65553 +
65554 if (syslog_action_restricted(type)) {
65555 if (capable(CAP_SYSLOG))
65556 return 0;
65557 diff --git a/kernel/profile.c b/kernel/profile.c
65558 index 76b8e77..a2930e8 100644
65559 --- a/kernel/profile.c
65560 +++ b/kernel/profile.c
65561 @@ -39,7 +39,7 @@ struct profile_hit {
65562 /* Oprofile timer tick hook */
65563 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65564
65565 -static atomic_t *prof_buffer;
65566 +static atomic_unchecked_t *prof_buffer;
65567 static unsigned long prof_len, prof_shift;
65568
65569 int prof_on __read_mostly;
65570 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65571 hits[i].pc = 0;
65572 continue;
65573 }
65574 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65575 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65576 hits[i].hits = hits[i].pc = 0;
65577 }
65578 }
65579 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65580 * Add the current hit(s) and flush the write-queue out
65581 * to the global buffer:
65582 */
65583 - atomic_add(nr_hits, &prof_buffer[pc]);
65584 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65585 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65586 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65587 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65588 hits[i].pc = hits[i].hits = 0;
65589 }
65590 out:
65591 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65592 {
65593 unsigned long pc;
65594 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65595 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65596 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65597 }
65598 #endif /* !CONFIG_SMP */
65599
65600 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65601 return -EFAULT;
65602 buf++; p++; count--; read++;
65603 }
65604 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65605 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65606 if (copy_to_user(buf, (void *)pnt, count))
65607 return -EFAULT;
65608 read += count;
65609 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65610 }
65611 #endif
65612 profile_discard_flip_buffers();
65613 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65614 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65615 return count;
65616 }
65617
65618 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65619 index 78ab24a..332c915 100644
65620 --- a/kernel/ptrace.c
65621 +++ b/kernel/ptrace.c
65622 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65623 return ret;
65624 }
65625
65626 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65627 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65628 + unsigned int log)
65629 {
65630 const struct cred *cred = current_cred(), *tcred;
65631
65632 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65633 cred->gid == tcred->sgid &&
65634 cred->gid == tcred->gid))
65635 goto ok;
65636 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65637 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65638 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65639 goto ok;
65640 rcu_read_unlock();
65641 return -EPERM;
65642 @@ -207,7 +209,9 @@ ok:
65643 smp_rmb();
65644 if (task->mm)
65645 dumpable = get_dumpable(task->mm);
65646 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65647 + if (!dumpable &&
65648 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65649 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65650 return -EPERM;
65651
65652 return security_ptrace_access_check(task, mode);
65653 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65654 {
65655 int err;
65656 task_lock(task);
65657 - err = __ptrace_may_access(task, mode);
65658 + err = __ptrace_may_access(task, mode, 0);
65659 + task_unlock(task);
65660 + return !err;
65661 +}
65662 +
65663 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65664 +{
65665 + return __ptrace_may_access(task, mode, 0);
65666 +}
65667 +
65668 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65669 +{
65670 + int err;
65671 + task_lock(task);
65672 + err = __ptrace_may_access(task, mode, 1);
65673 task_unlock(task);
65674 return !err;
65675 }
65676 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65677 goto out;
65678
65679 task_lock(task);
65680 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65681 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65682 task_unlock(task);
65683 if (retval)
65684 goto unlock_creds;
65685 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65686 task->ptrace = PT_PTRACED;
65687 if (seize)
65688 task->ptrace |= PT_SEIZED;
65689 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65690 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65691 task->ptrace |= PT_PTRACE_CAP;
65692
65693 __ptrace_link(task, current);
65694 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65695 break;
65696 return -EIO;
65697 }
65698 - if (copy_to_user(dst, buf, retval))
65699 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65700 return -EFAULT;
65701 copied += retval;
65702 src += retval;
65703 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65704 bool seized = child->ptrace & PT_SEIZED;
65705 int ret = -EIO;
65706 siginfo_t siginfo, *si;
65707 - void __user *datavp = (void __user *) data;
65708 + void __user *datavp = (__force void __user *) data;
65709 unsigned long __user *datalp = datavp;
65710 unsigned long flags;
65711
65712 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65713 goto out;
65714 }
65715
65716 + if (gr_handle_ptrace(child, request)) {
65717 + ret = -EPERM;
65718 + goto out_put_task_struct;
65719 + }
65720 +
65721 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65722 ret = ptrace_attach(child, request, data);
65723 /*
65724 * Some architectures need to do book-keeping after
65725 * a ptrace attach.
65726 */
65727 - if (!ret)
65728 + if (!ret) {
65729 arch_ptrace_attach(child);
65730 + gr_audit_ptrace(child);
65731 + }
65732 goto out_put_task_struct;
65733 }
65734
65735 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65736 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65737 if (copied != sizeof(tmp))
65738 return -EIO;
65739 - return put_user(tmp, (unsigned long __user *)data);
65740 + return put_user(tmp, (__force unsigned long __user *)data);
65741 }
65742
65743 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65744 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65745 goto out;
65746 }
65747
65748 + if (gr_handle_ptrace(child, request)) {
65749 + ret = -EPERM;
65750 + goto out_put_task_struct;
65751 + }
65752 +
65753 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65754 ret = ptrace_attach(child, request, data);
65755 /*
65756 * Some architectures need to do book-keeping after
65757 * a ptrace attach.
65758 */
65759 - if (!ret)
65760 + if (!ret) {
65761 arch_ptrace_attach(child);
65762 + gr_audit_ptrace(child);
65763 + }
65764 goto out_put_task_struct;
65765 }
65766
65767 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65768 index 764825c..3aa6ac4 100644
65769 --- a/kernel/rcutorture.c
65770 +++ b/kernel/rcutorture.c
65771 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65772 { 0 };
65773 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65774 { 0 };
65775 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65776 -static atomic_t n_rcu_torture_alloc;
65777 -static atomic_t n_rcu_torture_alloc_fail;
65778 -static atomic_t n_rcu_torture_free;
65779 -static atomic_t n_rcu_torture_mberror;
65780 -static atomic_t n_rcu_torture_error;
65781 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65782 +static atomic_unchecked_t n_rcu_torture_alloc;
65783 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65784 +static atomic_unchecked_t n_rcu_torture_free;
65785 +static atomic_unchecked_t n_rcu_torture_mberror;
65786 +static atomic_unchecked_t n_rcu_torture_error;
65787 static long n_rcu_torture_boost_ktrerror;
65788 static long n_rcu_torture_boost_rterror;
65789 static long n_rcu_torture_boost_failure;
65790 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65791
65792 spin_lock_bh(&rcu_torture_lock);
65793 if (list_empty(&rcu_torture_freelist)) {
65794 - atomic_inc(&n_rcu_torture_alloc_fail);
65795 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65796 spin_unlock_bh(&rcu_torture_lock);
65797 return NULL;
65798 }
65799 - atomic_inc(&n_rcu_torture_alloc);
65800 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65801 p = rcu_torture_freelist.next;
65802 list_del_init(p);
65803 spin_unlock_bh(&rcu_torture_lock);
65804 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65805 static void
65806 rcu_torture_free(struct rcu_torture *p)
65807 {
65808 - atomic_inc(&n_rcu_torture_free);
65809 + atomic_inc_unchecked(&n_rcu_torture_free);
65810 spin_lock_bh(&rcu_torture_lock);
65811 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65812 spin_unlock_bh(&rcu_torture_lock);
65813 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65814 i = rp->rtort_pipe_count;
65815 if (i > RCU_TORTURE_PIPE_LEN)
65816 i = RCU_TORTURE_PIPE_LEN;
65817 - atomic_inc(&rcu_torture_wcount[i]);
65818 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65819 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65820 rp->rtort_mbtest = 0;
65821 rcu_torture_free(rp);
65822 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65823 i = rp->rtort_pipe_count;
65824 if (i > RCU_TORTURE_PIPE_LEN)
65825 i = RCU_TORTURE_PIPE_LEN;
65826 - atomic_inc(&rcu_torture_wcount[i]);
65827 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65828 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65829 rp->rtort_mbtest = 0;
65830 list_del(&rp->rtort_free);
65831 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65832 i = old_rp->rtort_pipe_count;
65833 if (i > RCU_TORTURE_PIPE_LEN)
65834 i = RCU_TORTURE_PIPE_LEN;
65835 - atomic_inc(&rcu_torture_wcount[i]);
65836 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65837 old_rp->rtort_pipe_count++;
65838 cur_ops->deferred_free(old_rp);
65839 }
65840 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65841 return;
65842 }
65843 if (p->rtort_mbtest == 0)
65844 - atomic_inc(&n_rcu_torture_mberror);
65845 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65846 spin_lock(&rand_lock);
65847 cur_ops->read_delay(&rand);
65848 n_rcu_torture_timers++;
65849 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65850 continue;
65851 }
65852 if (p->rtort_mbtest == 0)
65853 - atomic_inc(&n_rcu_torture_mberror);
65854 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65855 cur_ops->read_delay(&rand);
65856 preempt_disable();
65857 pipe_count = p->rtort_pipe_count;
65858 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65859 rcu_torture_current,
65860 rcu_torture_current_version,
65861 list_empty(&rcu_torture_freelist),
65862 - atomic_read(&n_rcu_torture_alloc),
65863 - atomic_read(&n_rcu_torture_alloc_fail),
65864 - atomic_read(&n_rcu_torture_free),
65865 - atomic_read(&n_rcu_torture_mberror),
65866 + atomic_read_unchecked(&n_rcu_torture_alloc),
65867 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65868 + atomic_read_unchecked(&n_rcu_torture_free),
65869 + atomic_read_unchecked(&n_rcu_torture_mberror),
65870 n_rcu_torture_boost_ktrerror,
65871 n_rcu_torture_boost_rterror,
65872 n_rcu_torture_boost_failure,
65873 n_rcu_torture_boosts,
65874 n_rcu_torture_timers);
65875 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65876 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65877 n_rcu_torture_boost_ktrerror != 0 ||
65878 n_rcu_torture_boost_rterror != 0 ||
65879 n_rcu_torture_boost_failure != 0)
65880 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65881 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65882 if (i > 1) {
65883 cnt += sprintf(&page[cnt], "!!! ");
65884 - atomic_inc(&n_rcu_torture_error);
65885 + atomic_inc_unchecked(&n_rcu_torture_error);
65886 WARN_ON_ONCE(1);
65887 }
65888 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65889 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65890 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65891 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65892 cnt += sprintf(&page[cnt], " %d",
65893 - atomic_read(&rcu_torture_wcount[i]));
65894 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65895 }
65896 cnt += sprintf(&page[cnt], "\n");
65897 if (cur_ops->stats)
65898 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65899
65900 if (cur_ops->cleanup)
65901 cur_ops->cleanup();
65902 - if (atomic_read(&n_rcu_torture_error))
65903 + if (atomic_read_unchecked(&n_rcu_torture_error))
65904 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65905 else
65906 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65907 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65908
65909 rcu_torture_current = NULL;
65910 rcu_torture_current_version = 0;
65911 - atomic_set(&n_rcu_torture_alloc, 0);
65912 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65913 - atomic_set(&n_rcu_torture_free, 0);
65914 - atomic_set(&n_rcu_torture_mberror, 0);
65915 - atomic_set(&n_rcu_torture_error, 0);
65916 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65917 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65918 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65919 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65920 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65921 n_rcu_torture_boost_ktrerror = 0;
65922 n_rcu_torture_boost_rterror = 0;
65923 n_rcu_torture_boost_failure = 0;
65924 n_rcu_torture_boosts = 0;
65925 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65926 - atomic_set(&rcu_torture_wcount[i], 0);
65927 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65928 for_each_possible_cpu(cpu) {
65929 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65930 per_cpu(rcu_torture_count, cpu)[i] = 0;
65931 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65932 index 6b76d81..7afc1b3 100644
65933 --- a/kernel/rcutree.c
65934 +++ b/kernel/rcutree.c
65935 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65936 trace_rcu_dyntick("Start");
65937 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65938 smp_mb__before_atomic_inc(); /* See above. */
65939 - atomic_inc(&rdtp->dynticks);
65940 + atomic_inc_unchecked(&rdtp->dynticks);
65941 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65942 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65943 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65944 local_irq_restore(flags);
65945 }
65946
65947 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65948 return;
65949 }
65950 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65951 - atomic_inc(&rdtp->dynticks);
65952 + atomic_inc_unchecked(&rdtp->dynticks);
65953 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65954 smp_mb__after_atomic_inc(); /* See above. */
65955 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65956 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65957 trace_rcu_dyntick("End");
65958 local_irq_restore(flags);
65959 }
65960 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65961 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65962
65963 if (rdtp->dynticks_nmi_nesting == 0 &&
65964 - (atomic_read(&rdtp->dynticks) & 0x1))
65965 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65966 return;
65967 rdtp->dynticks_nmi_nesting++;
65968 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65969 - atomic_inc(&rdtp->dynticks);
65970 + atomic_inc_unchecked(&rdtp->dynticks);
65971 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65972 smp_mb__after_atomic_inc(); /* See above. */
65973 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65974 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65975 }
65976
65977 /**
65978 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65979 return;
65980 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65981 smp_mb__before_atomic_inc(); /* See above. */
65982 - atomic_inc(&rdtp->dynticks);
65983 + atomic_inc_unchecked(&rdtp->dynticks);
65984 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65985 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65986 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65987 }
65988
65989 /**
65990 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65991 */
65992 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65993 {
65994 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65995 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65996 return 0;
65997 }
65998
65999 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
66000 unsigned int curr;
66001 unsigned int snap;
66002
66003 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
66004 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
66005 snap = (unsigned int)rdp->dynticks_snap;
66006
66007 /*
66008 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
66009 /*
66010 * Do RCU core processing for the current CPU.
66011 */
66012 -static void rcu_process_callbacks(struct softirq_action *unused)
66013 +static void rcu_process_callbacks(void)
66014 {
66015 trace_rcu_utilization("Start RCU core");
66016 __rcu_process_callbacks(&rcu_sched_state,
66017 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
66018 index 849ce9e..74bc9de 100644
66019 --- a/kernel/rcutree.h
66020 +++ b/kernel/rcutree.h
66021 @@ -86,7 +86,7 @@
66022 struct rcu_dynticks {
66023 int dynticks_nesting; /* Track irq/process nesting level. */
66024 int dynticks_nmi_nesting; /* Track NMI nesting level. */
66025 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
66026 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
66027 };
66028
66029 /* RCU's kthread states for tracing. */
66030 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
66031 index 4b9b9f8..2326053 100644
66032 --- a/kernel/rcutree_plugin.h
66033 +++ b/kernel/rcutree_plugin.h
66034 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
66035
66036 /* Clean up and exit. */
66037 smp_mb(); /* ensure expedited GP seen before counter increment. */
66038 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
66039 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
66040 unlock_mb_ret:
66041 mutex_unlock(&sync_rcu_preempt_exp_mutex);
66042 mb_ret:
66043 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
66044
66045 #else /* #ifndef CONFIG_SMP */
66046
66047 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
66048 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
66049 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
66050 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
66051
66052 static int synchronize_sched_expedited_cpu_stop(void *data)
66053 {
66054 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
66055 int firstsnap, s, snap, trycount = 0;
66056
66057 /* Note that atomic_inc_return() implies full memory barrier. */
66058 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
66059 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
66060 get_online_cpus();
66061
66062 /*
66063 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
66064 }
66065
66066 /* Check to see if someone else did our work for us. */
66067 - s = atomic_read(&sync_sched_expedited_done);
66068 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66069 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
66070 smp_mb(); /* ensure test happens before caller kfree */
66071 return;
66072 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
66073 * grace period works for us.
66074 */
66075 get_online_cpus();
66076 - snap = atomic_read(&sync_sched_expedited_started) - 1;
66077 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
66078 smp_mb(); /* ensure read is before try_stop_cpus(). */
66079 }
66080
66081 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
66082 * than we did beat us to the punch.
66083 */
66084 do {
66085 - s = atomic_read(&sync_sched_expedited_done);
66086 + s = atomic_read_unchecked(&sync_sched_expedited_done);
66087 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
66088 smp_mb(); /* ensure test happens before caller kfree */
66089 break;
66090 }
66091 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
66092 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
66093
66094 put_online_cpus();
66095 }
66096 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
66097 for_each_online_cpu(thatcpu) {
66098 if (thatcpu == cpu)
66099 continue;
66100 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
66101 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
66102 thatcpu).dynticks);
66103 smp_mb(); /* Order sampling of snap with end of grace period. */
66104 if ((snap & 0x1) != 0) {
66105 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
66106 index 9feffa4..54058df 100644
66107 --- a/kernel/rcutree_trace.c
66108 +++ b/kernel/rcutree_trace.c
66109 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
66110 rdp->qs_pending);
66111 #ifdef CONFIG_NO_HZ
66112 seq_printf(m, " dt=%d/%d/%d df=%lu",
66113 - atomic_read(&rdp->dynticks->dynticks),
66114 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66115 rdp->dynticks->dynticks_nesting,
66116 rdp->dynticks->dynticks_nmi_nesting,
66117 rdp->dynticks_fqs);
66118 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
66119 rdp->qs_pending);
66120 #ifdef CONFIG_NO_HZ
66121 seq_printf(m, ",%d,%d,%d,%lu",
66122 - atomic_read(&rdp->dynticks->dynticks),
66123 + atomic_read_unchecked(&rdp->dynticks->dynticks),
66124 rdp->dynticks->dynticks_nesting,
66125 rdp->dynticks->dynticks_nmi_nesting,
66126 rdp->dynticks_fqs);
66127 diff --git a/kernel/resource.c b/kernel/resource.c
66128 index 7640b3a..5879283 100644
66129 --- a/kernel/resource.c
66130 +++ b/kernel/resource.c
66131 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
66132
66133 static int __init ioresources_init(void)
66134 {
66135 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66136 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66137 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
66138 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
66139 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66140 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
66141 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
66142 +#endif
66143 +#else
66144 proc_create("ioports", 0, NULL, &proc_ioports_operations);
66145 proc_create("iomem", 0, NULL, &proc_iomem_operations);
66146 +#endif
66147 return 0;
66148 }
66149 __initcall(ioresources_init);
66150 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
66151 index 3d9f31c..7fefc9e 100644
66152 --- a/kernel/rtmutex-tester.c
66153 +++ b/kernel/rtmutex-tester.c
66154 @@ -20,7 +20,7 @@
66155 #define MAX_RT_TEST_MUTEXES 8
66156
66157 static spinlock_t rttest_lock;
66158 -static atomic_t rttest_event;
66159 +static atomic_unchecked_t rttest_event;
66160
66161 struct test_thread_data {
66162 int opcode;
66163 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66164
66165 case RTTEST_LOCKCONT:
66166 td->mutexes[td->opdata] = 1;
66167 - td->event = atomic_add_return(1, &rttest_event);
66168 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66169 return 0;
66170
66171 case RTTEST_RESET:
66172 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66173 return 0;
66174
66175 case RTTEST_RESETEVENT:
66176 - atomic_set(&rttest_event, 0);
66177 + atomic_set_unchecked(&rttest_event, 0);
66178 return 0;
66179
66180 default:
66181 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66182 return ret;
66183
66184 td->mutexes[id] = 1;
66185 - td->event = atomic_add_return(1, &rttest_event);
66186 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66187 rt_mutex_lock(&mutexes[id]);
66188 - td->event = atomic_add_return(1, &rttest_event);
66189 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66190 td->mutexes[id] = 4;
66191 return 0;
66192
66193 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66194 return ret;
66195
66196 td->mutexes[id] = 1;
66197 - td->event = atomic_add_return(1, &rttest_event);
66198 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66199 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
66200 - td->event = atomic_add_return(1, &rttest_event);
66201 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66202 td->mutexes[id] = ret ? 0 : 4;
66203 return ret ? -EINTR : 0;
66204
66205 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
66206 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
66207 return ret;
66208
66209 - td->event = atomic_add_return(1, &rttest_event);
66210 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66211 rt_mutex_unlock(&mutexes[id]);
66212 - td->event = atomic_add_return(1, &rttest_event);
66213 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66214 td->mutexes[id] = 0;
66215 return 0;
66216
66217 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66218 break;
66219
66220 td->mutexes[dat] = 2;
66221 - td->event = atomic_add_return(1, &rttest_event);
66222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66223 break;
66224
66225 default:
66226 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66227 return;
66228
66229 td->mutexes[dat] = 3;
66230 - td->event = atomic_add_return(1, &rttest_event);
66231 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66232 break;
66233
66234 case RTTEST_LOCKNOWAIT:
66235 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
66236 return;
66237
66238 td->mutexes[dat] = 1;
66239 - td->event = atomic_add_return(1, &rttest_event);
66240 + td->event = atomic_add_return_unchecked(1, &rttest_event);
66241 return;
66242
66243 default:
66244 diff --git a/kernel/sched.c b/kernel/sched.c
66245 index d6b149c..896cbb8 100644
66246 --- a/kernel/sched.c
66247 +++ b/kernel/sched.c
66248 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
66249 BUG(); /* the idle class will always have a runnable task */
66250 }
66251
66252 +#ifdef CONFIG_GRKERNSEC_SETXID
66253 +extern void gr_delayed_cred_worker(void);
66254 +static inline void gr_cred_schedule(void)
66255 +{
66256 + if (unlikely(current->delayed_cred))
66257 + gr_delayed_cred_worker();
66258 +}
66259 +#else
66260 +static inline void gr_cred_schedule(void)
66261 +{
66262 +}
66263 +#endif
66264 +
66265 /*
66266 * __schedule() is the main scheduler function.
66267 */
66268 @@ -4408,6 +4421,8 @@ need_resched:
66269
66270 schedule_debug(prev);
66271
66272 + gr_cred_schedule();
66273 +
66274 if (sched_feat(HRTICK))
66275 hrtick_clear(rq);
66276
66277 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66278 /* convert nice value [19,-20] to rlimit style value [1,40] */
66279 int nice_rlim = 20 - nice;
66280
66281 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66282 +
66283 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66284 capable(CAP_SYS_NICE));
66285 }
66286 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66287 if (nice > 19)
66288 nice = 19;
66289
66290 - if (increment < 0 && !can_nice(current, nice))
66291 + if (increment < 0 && (!can_nice(current, nice) ||
66292 + gr_handle_chroot_nice()))
66293 return -EPERM;
66294
66295 retval = security_task_setnice(current, nice);
66296 @@ -5288,6 +5306,7 @@ recheck:
66297 unsigned long rlim_rtprio =
66298 task_rlimit(p, RLIMIT_RTPRIO);
66299
66300 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66301 /* can't set/change the rt policy */
66302 if (policy != p->policy && !rlim_rtprio)
66303 return -EPERM;
66304 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66305 index 429242f..d7cca82 100644
66306 --- a/kernel/sched_autogroup.c
66307 +++ b/kernel/sched_autogroup.c
66308 @@ -7,7 +7,7 @@
66309
66310 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66311 static struct autogroup autogroup_default;
66312 -static atomic_t autogroup_seq_nr;
66313 +static atomic_unchecked_t autogroup_seq_nr;
66314
66315 static void __init autogroup_init(struct task_struct *init_task)
66316 {
66317 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66318
66319 kref_init(&ag->kref);
66320 init_rwsem(&ag->lock);
66321 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66322 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66323 ag->tg = tg;
66324 #ifdef CONFIG_RT_GROUP_SCHED
66325 /*
66326 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66327 index 8a39fa3..34f3dbc 100644
66328 --- a/kernel/sched_fair.c
66329 +++ b/kernel/sched_fair.c
66330 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66331 * run_rebalance_domains is triggered when needed from the scheduler tick.
66332 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66333 */
66334 -static void run_rebalance_domains(struct softirq_action *h)
66335 +static void run_rebalance_domains(void)
66336 {
66337 int this_cpu = smp_processor_id();
66338 struct rq *this_rq = cpu_rq(this_cpu);
66339 diff --git a/kernel/signal.c b/kernel/signal.c
66340 index 2065515..aed2987 100644
66341 --- a/kernel/signal.c
66342 +++ b/kernel/signal.c
66343 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66344
66345 int print_fatal_signals __read_mostly;
66346
66347 -static void __user *sig_handler(struct task_struct *t, int sig)
66348 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66349 {
66350 return t->sighand->action[sig - 1].sa.sa_handler;
66351 }
66352
66353 -static int sig_handler_ignored(void __user *handler, int sig)
66354 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66355 {
66356 /* Is it explicitly or implicitly ignored? */
66357 return handler == SIG_IGN ||
66358 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66359 static int sig_task_ignored(struct task_struct *t, int sig,
66360 int from_ancestor_ns)
66361 {
66362 - void __user *handler;
66363 + __sighandler_t handler;
66364
66365 handler = sig_handler(t, sig);
66366
66367 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66368 atomic_inc(&user->sigpending);
66369 rcu_read_unlock();
66370
66371 + if (!override_rlimit)
66372 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66373 +
66374 if (override_rlimit ||
66375 atomic_read(&user->sigpending) <=
66376 task_rlimit(t, RLIMIT_SIGPENDING)) {
66377 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66378
66379 int unhandled_signal(struct task_struct *tsk, int sig)
66380 {
66381 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66382 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66383 if (is_global_init(tsk))
66384 return 1;
66385 if (handler != SIG_IGN && handler != SIG_DFL)
66386 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66387 }
66388 }
66389
66390 + /* allow glibc communication via tgkill to other threads in our
66391 + thread group */
66392 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66393 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66394 + && gr_handle_signal(t, sig))
66395 + return -EPERM;
66396 +
66397 return security_task_kill(t, info, sig, 0);
66398 }
66399
66400 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66401 return send_signal(sig, info, p, 1);
66402 }
66403
66404 -static int
66405 +int
66406 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66407 {
66408 return send_signal(sig, info, t, 0);
66409 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66410 unsigned long int flags;
66411 int ret, blocked, ignored;
66412 struct k_sigaction *action;
66413 + int is_unhandled = 0;
66414
66415 spin_lock_irqsave(&t->sighand->siglock, flags);
66416 action = &t->sighand->action[sig-1];
66417 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66418 }
66419 if (action->sa.sa_handler == SIG_DFL)
66420 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66421 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66422 + is_unhandled = 1;
66423 ret = specific_send_sig_info(sig, info, t);
66424 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66425
66426 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66427 + normal operation */
66428 + if (is_unhandled) {
66429 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66430 + gr_handle_crash(t, sig);
66431 + }
66432 +
66433 return ret;
66434 }
66435
66436 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66437 ret = check_kill_permission(sig, info, p);
66438 rcu_read_unlock();
66439
66440 - if (!ret && sig)
66441 + if (!ret && sig) {
66442 ret = do_send_sig_info(sig, info, p, true);
66443 + if (!ret)
66444 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66445 + }
66446
66447 return ret;
66448 }
66449 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66450 int error = -ESRCH;
66451
66452 rcu_read_lock();
66453 - p = find_task_by_vpid(pid);
66454 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66455 + /* allow glibc communication via tgkill to other threads in our
66456 + thread group */
66457 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66458 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66459 + p = find_task_by_vpid_unrestricted(pid);
66460 + else
66461 +#endif
66462 + p = find_task_by_vpid(pid);
66463 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66464 error = check_kill_permission(sig, info, p);
66465 /*
66466 diff --git a/kernel/smp.c b/kernel/smp.c
66467 index db197d6..17aef0b 100644
66468 --- a/kernel/smp.c
66469 +++ b/kernel/smp.c
66470 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66471 }
66472 EXPORT_SYMBOL(smp_call_function);
66473
66474 -void ipi_call_lock(void)
66475 +void ipi_call_lock(void) __acquires(call_function.lock)
66476 {
66477 raw_spin_lock(&call_function.lock);
66478 }
66479
66480 -void ipi_call_unlock(void)
66481 +void ipi_call_unlock(void) __releases(call_function.lock)
66482 {
66483 raw_spin_unlock(&call_function.lock);
66484 }
66485
66486 -void ipi_call_lock_irq(void)
66487 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66488 {
66489 raw_spin_lock_irq(&call_function.lock);
66490 }
66491
66492 -void ipi_call_unlock_irq(void)
66493 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66494 {
66495 raw_spin_unlock_irq(&call_function.lock);
66496 }
66497 diff --git a/kernel/softirq.c b/kernel/softirq.c
66498 index 2c71d91..1021f81 100644
66499 --- a/kernel/softirq.c
66500 +++ b/kernel/softirq.c
66501 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66502
66503 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66504
66505 -char *softirq_to_name[NR_SOFTIRQS] = {
66506 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66507 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66508 "TASKLET", "SCHED", "HRTIMER", "RCU"
66509 };
66510 @@ -235,7 +235,7 @@ restart:
66511 kstat_incr_softirqs_this_cpu(vec_nr);
66512
66513 trace_softirq_entry(vec_nr);
66514 - h->action(h);
66515 + h->action();
66516 trace_softirq_exit(vec_nr);
66517 if (unlikely(prev_count != preempt_count())) {
66518 printk(KERN_ERR "huh, entered softirq %u %s %p"
66519 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66520 local_irq_restore(flags);
66521 }
66522
66523 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66524 +void open_softirq(int nr, void (*action)(void))
66525 {
66526 - softirq_vec[nr].action = action;
66527 + pax_open_kernel();
66528 + *(void **)&softirq_vec[nr].action = action;
66529 + pax_close_kernel();
66530 }
66531
66532 /*
66533 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66534
66535 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66536
66537 -static void tasklet_action(struct softirq_action *a)
66538 +static void tasklet_action(void)
66539 {
66540 struct tasklet_struct *list;
66541
66542 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66543 }
66544 }
66545
66546 -static void tasklet_hi_action(struct softirq_action *a)
66547 +static void tasklet_hi_action(void)
66548 {
66549 struct tasklet_struct *list;
66550
66551 diff --git a/kernel/sys.c b/kernel/sys.c
66552 index 481611f..0754d86 100644
66553 --- a/kernel/sys.c
66554 +++ b/kernel/sys.c
66555 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66556 error = -EACCES;
66557 goto out;
66558 }
66559 +
66560 + if (gr_handle_chroot_setpriority(p, niceval)) {
66561 + error = -EACCES;
66562 + goto out;
66563 + }
66564 +
66565 no_nice = security_task_setnice(p, niceval);
66566 if (no_nice) {
66567 error = no_nice;
66568 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66569 goto error;
66570 }
66571
66572 + if (gr_check_group_change(new->gid, new->egid, -1))
66573 + goto error;
66574 +
66575 if (rgid != (gid_t) -1 ||
66576 (egid != (gid_t) -1 && egid != old->gid))
66577 new->sgid = new->egid;
66578 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66579 old = current_cred();
66580
66581 retval = -EPERM;
66582 +
66583 + if (gr_check_group_change(gid, gid, gid))
66584 + goto error;
66585 +
66586 if (nsown_capable(CAP_SETGID))
66587 new->gid = new->egid = new->sgid = new->fsgid = gid;
66588 else if (gid == old->gid || gid == old->sgid)
66589 @@ -618,7 +631,7 @@ error:
66590 /*
66591 * change the user struct in a credentials set to match the new UID
66592 */
66593 -static int set_user(struct cred *new)
66594 +int set_user(struct cred *new)
66595 {
66596 struct user_struct *new_user;
66597
66598 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66599 goto error;
66600 }
66601
66602 + if (gr_check_user_change(new->uid, new->euid, -1))
66603 + goto error;
66604 +
66605 if (new->uid != old->uid) {
66606 retval = set_user(new);
66607 if (retval < 0)
66608 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66609 old = current_cred();
66610
66611 retval = -EPERM;
66612 +
66613 + if (gr_check_crash_uid(uid))
66614 + goto error;
66615 + if (gr_check_user_change(uid, uid, uid))
66616 + goto error;
66617 +
66618 if (nsown_capable(CAP_SETUID)) {
66619 new->suid = new->uid = uid;
66620 if (uid != old->uid) {
66621 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66622 goto error;
66623 }
66624
66625 + if (gr_check_user_change(ruid, euid, -1))
66626 + goto error;
66627 +
66628 if (ruid != (uid_t) -1) {
66629 new->uid = ruid;
66630 if (ruid != old->uid) {
66631 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66632 goto error;
66633 }
66634
66635 + if (gr_check_group_change(rgid, egid, -1))
66636 + goto error;
66637 +
66638 if (rgid != (gid_t) -1)
66639 new->gid = rgid;
66640 if (egid != (gid_t) -1)
66641 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66642 old = current_cred();
66643 old_fsuid = old->fsuid;
66644
66645 + if (gr_check_user_change(-1, -1, uid))
66646 + goto error;
66647 +
66648 if (uid == old->uid || uid == old->euid ||
66649 uid == old->suid || uid == old->fsuid ||
66650 nsown_capable(CAP_SETUID)) {
66651 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66652 }
66653 }
66654
66655 +error:
66656 abort_creds(new);
66657 return old_fsuid;
66658
66659 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66660 if (gid == old->gid || gid == old->egid ||
66661 gid == old->sgid || gid == old->fsgid ||
66662 nsown_capable(CAP_SETGID)) {
66663 + if (gr_check_group_change(-1, -1, gid))
66664 + goto error;
66665 +
66666 if (gid != old_fsgid) {
66667 new->fsgid = gid;
66668 goto change_okay;
66669 }
66670 }
66671
66672 +error:
66673 abort_creds(new);
66674 return old_fsgid;
66675
66676 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66677 }
66678 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66679 snprintf(buf, len, "2.6.%u%s", v, rest);
66680 - ret = copy_to_user(release, buf, len);
66681 + if (len > sizeof(buf))
66682 + ret = -EFAULT;
66683 + else
66684 + ret = copy_to_user(release, buf, len);
66685 }
66686 return ret;
66687 }
66688 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66689 return -EFAULT;
66690
66691 down_read(&uts_sem);
66692 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66693 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66694 __OLD_UTS_LEN);
66695 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66696 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66697 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66698 __OLD_UTS_LEN);
66699 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66700 - error |= __copy_to_user(&name->release, &utsname()->release,
66701 + error |= __copy_to_user(name->release, &utsname()->release,
66702 __OLD_UTS_LEN);
66703 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66704 - error |= __copy_to_user(&name->version, &utsname()->version,
66705 + error |= __copy_to_user(name->version, &utsname()->version,
66706 __OLD_UTS_LEN);
66707 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66708 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66709 + error |= __copy_to_user(name->machine, &utsname()->machine,
66710 __OLD_UTS_LEN);
66711 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66712 up_read(&uts_sem);
66713 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66714 error = get_dumpable(me->mm);
66715 break;
66716 case PR_SET_DUMPABLE:
66717 - if (arg2 < 0 || arg2 > 1) {
66718 + if (arg2 > 1) {
66719 error = -EINVAL;
66720 break;
66721 }
66722 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66723 index ae27196..7506d69 100644
66724 --- a/kernel/sysctl.c
66725 +++ b/kernel/sysctl.c
66726 @@ -86,6 +86,13 @@
66727
66728
66729 #if defined(CONFIG_SYSCTL)
66730 +#include <linux/grsecurity.h>
66731 +#include <linux/grinternal.h>
66732 +
66733 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66734 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66735 + const int op);
66736 +extern int gr_handle_chroot_sysctl(const int op);
66737
66738 /* External variables not in a header file. */
66739 extern int sysctl_overcommit_memory;
66740 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66741 }
66742
66743 #endif
66744 +extern struct ctl_table grsecurity_table[];
66745
66746 static struct ctl_table root_table[];
66747 static struct ctl_table_root sysctl_table_root;
66748 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66749 int sysctl_legacy_va_layout;
66750 #endif
66751
66752 +#ifdef CONFIG_PAX_SOFTMODE
66753 +static ctl_table pax_table[] = {
66754 + {
66755 + .procname = "softmode",
66756 + .data = &pax_softmode,
66757 + .maxlen = sizeof(unsigned int),
66758 + .mode = 0600,
66759 + .proc_handler = &proc_dointvec,
66760 + },
66761 +
66762 + { }
66763 +};
66764 +#endif
66765 +
66766 /* The default sysctl tables: */
66767
66768 static struct ctl_table root_table[] = {
66769 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66770 #endif
66771
66772 static struct ctl_table kern_table[] = {
66773 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66774 + {
66775 + .procname = "grsecurity",
66776 + .mode = 0500,
66777 + .child = grsecurity_table,
66778 + },
66779 +#endif
66780 +
66781 +#ifdef CONFIG_PAX_SOFTMODE
66782 + {
66783 + .procname = "pax",
66784 + .mode = 0500,
66785 + .child = pax_table,
66786 + },
66787 +#endif
66788 +
66789 {
66790 .procname = "sched_child_runs_first",
66791 .data = &sysctl_sched_child_runs_first,
66792 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66793 .data = &modprobe_path,
66794 .maxlen = KMOD_PATH_LEN,
66795 .mode = 0644,
66796 - .proc_handler = proc_dostring,
66797 + .proc_handler = proc_dostring_modpriv,
66798 },
66799 {
66800 .procname = "modules_disabled",
66801 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66802 .extra1 = &zero,
66803 .extra2 = &one,
66804 },
66805 +#endif
66806 {
66807 .procname = "kptr_restrict",
66808 .data = &kptr_restrict,
66809 .maxlen = sizeof(int),
66810 .mode = 0644,
66811 .proc_handler = proc_dmesg_restrict,
66812 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66813 + .extra1 = &two,
66814 +#else
66815 .extra1 = &zero,
66816 +#endif
66817 .extra2 = &two,
66818 },
66819 -#endif
66820 {
66821 .procname = "ngroups_max",
66822 .data = &ngroups_max,
66823 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66824 .proc_handler = proc_dointvec_minmax,
66825 .extra1 = &zero,
66826 },
66827 + {
66828 + .procname = "heap_stack_gap",
66829 + .data = &sysctl_heap_stack_gap,
66830 + .maxlen = sizeof(sysctl_heap_stack_gap),
66831 + .mode = 0644,
66832 + .proc_handler = proc_doulongvec_minmax,
66833 + },
66834 #else
66835 {
66836 .procname = "nr_trim_pages",
66837 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66838 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66839 {
66840 int mode;
66841 + int error;
66842 +
66843 + if (table->parent != NULL && table->parent->procname != NULL &&
66844 + table->procname != NULL &&
66845 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66846 + return -EACCES;
66847 + if (gr_handle_chroot_sysctl(op))
66848 + return -EACCES;
66849 + error = gr_handle_sysctl(table, op);
66850 + if (error)
66851 + return error;
66852
66853 if (root->permissions)
66854 mode = root->permissions(root, current->nsproxy, table);
66855 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66856 buffer, lenp, ppos);
66857 }
66858
66859 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66860 + void __user *buffer, size_t *lenp, loff_t *ppos)
66861 +{
66862 + if (write && !capable(CAP_SYS_MODULE))
66863 + return -EPERM;
66864 +
66865 + return _proc_do_string(table->data, table->maxlen, write,
66866 + buffer, lenp, ppos);
66867 +}
66868 +
66869 static size_t proc_skip_spaces(char **buf)
66870 {
66871 size_t ret;
66872 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66873 len = strlen(tmp);
66874 if (len > *size)
66875 len = *size;
66876 + if (len > sizeof(tmp))
66877 + len = sizeof(tmp);
66878 if (copy_to_user(*buf, tmp, len))
66879 return -EFAULT;
66880 *size -= len;
66881 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66882 *i = val;
66883 } else {
66884 val = convdiv * (*i) / convmul;
66885 - if (!first)
66886 + if (!first) {
66887 err = proc_put_char(&buffer, &left, '\t');
66888 + if (err)
66889 + break;
66890 + }
66891 err = proc_put_long(&buffer, &left, val, false);
66892 if (err)
66893 break;
66894 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66895 return -ENOSYS;
66896 }
66897
66898 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66899 + void __user *buffer, size_t *lenp, loff_t *ppos)
66900 +{
66901 + return -ENOSYS;
66902 +}
66903 +
66904 int proc_dointvec(struct ctl_table *table, int write,
66905 void __user *buffer, size_t *lenp, loff_t *ppos)
66906 {
66907 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66908 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66909 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66910 EXPORT_SYMBOL(proc_dostring);
66911 +EXPORT_SYMBOL(proc_dostring_modpriv);
66912 EXPORT_SYMBOL(proc_doulongvec_minmax);
66913 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66914 EXPORT_SYMBOL(register_sysctl_table);
66915 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66916 index a650694..aaeeb20 100644
66917 --- a/kernel/sysctl_binary.c
66918 +++ b/kernel/sysctl_binary.c
66919 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66920 int i;
66921
66922 set_fs(KERNEL_DS);
66923 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66924 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66925 set_fs(old_fs);
66926 if (result < 0)
66927 goto out_kfree;
66928 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66929 }
66930
66931 set_fs(KERNEL_DS);
66932 - result = vfs_write(file, buffer, str - buffer, &pos);
66933 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66934 set_fs(old_fs);
66935 if (result < 0)
66936 goto out_kfree;
66937 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66938 int i;
66939
66940 set_fs(KERNEL_DS);
66941 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66942 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66943 set_fs(old_fs);
66944 if (result < 0)
66945 goto out_kfree;
66946 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66947 }
66948
66949 set_fs(KERNEL_DS);
66950 - result = vfs_write(file, buffer, str - buffer, &pos);
66951 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66952 set_fs(old_fs);
66953 if (result < 0)
66954 goto out_kfree;
66955 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66956 int i;
66957
66958 set_fs(KERNEL_DS);
66959 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66960 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66961 set_fs(old_fs);
66962 if (result < 0)
66963 goto out;
66964 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66965 __le16 dnaddr;
66966
66967 set_fs(KERNEL_DS);
66968 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66969 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66970 set_fs(old_fs);
66971 if (result < 0)
66972 goto out;
66973 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66974 le16_to_cpu(dnaddr) & 0x3ff);
66975
66976 set_fs(KERNEL_DS);
66977 - result = vfs_write(file, buf, len, &pos);
66978 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66979 set_fs(old_fs);
66980 if (result < 0)
66981 goto out;
66982 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66983 index 362da65..ab8ef8c 100644
66984 --- a/kernel/sysctl_check.c
66985 +++ b/kernel/sysctl_check.c
66986 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66987 set_fail(&fail, table, "Directory with extra2");
66988 } else {
66989 if ((table->proc_handler == proc_dostring) ||
66990 + (table->proc_handler == proc_dostring_modpriv) ||
66991 (table->proc_handler == proc_dointvec) ||
66992 (table->proc_handler == proc_dointvec_minmax) ||
66993 (table->proc_handler == proc_dointvec_jiffies) ||
66994 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66995 index e660464..c8b9e67 100644
66996 --- a/kernel/taskstats.c
66997 +++ b/kernel/taskstats.c
66998 @@ -27,9 +27,12 @@
66999 #include <linux/cgroup.h>
67000 #include <linux/fs.h>
67001 #include <linux/file.h>
67002 +#include <linux/grsecurity.h>
67003 #include <net/genetlink.h>
67004 #include <linux/atomic.h>
67005
67006 +extern int gr_is_taskstats_denied(int pid);
67007 +
67008 /*
67009 * Maximum length of a cpumask that can be specified in
67010 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
67011 @@ -556,6 +559,9 @@ err:
67012
67013 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
67014 {
67015 + if (gr_is_taskstats_denied(current->pid))
67016 + return -EACCES;
67017 +
67018 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
67019 return cmd_attr_register_cpumask(info);
67020 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
67021 diff --git a/kernel/time.c b/kernel/time.c
67022 index 73e416d..cfc6f69 100644
67023 --- a/kernel/time.c
67024 +++ b/kernel/time.c
67025 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
67026 return error;
67027
67028 if (tz) {
67029 + /* we log in do_settimeofday called below, so don't log twice
67030 + */
67031 + if (!tv)
67032 + gr_log_timechange();
67033 +
67034 /* SMP safe, global irq locking makes it work. */
67035 sys_tz = *tz;
67036 update_vsyscall_tz();
67037 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
67038 index 8a46f5d..bbe6f9c 100644
67039 --- a/kernel/time/alarmtimer.c
67040 +++ b/kernel/time/alarmtimer.c
67041 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
67042 struct platform_device *pdev;
67043 int error = 0;
67044 int i;
67045 - struct k_clock alarm_clock = {
67046 + static struct k_clock alarm_clock = {
67047 .clock_getres = alarm_clock_getres,
67048 .clock_get = alarm_clock_get,
67049 .timer_create = alarm_timer_create,
67050 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
67051 index fd4a7b1..fae5c2a 100644
67052 --- a/kernel/time/tick-broadcast.c
67053 +++ b/kernel/time/tick-broadcast.c
67054 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
67055 * then clear the broadcast bit.
67056 */
67057 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
67058 - int cpu = smp_processor_id();
67059 + cpu = smp_processor_id();
67060
67061 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
67062 tick_broadcast_clear_oneshot(cpu);
67063 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
67064 index 2378413..be455fd 100644
67065 --- a/kernel/time/timekeeping.c
67066 +++ b/kernel/time/timekeeping.c
67067 @@ -14,6 +14,7 @@
67068 #include <linux/init.h>
67069 #include <linux/mm.h>
67070 #include <linux/sched.h>
67071 +#include <linux/grsecurity.h>
67072 #include <linux/syscore_ops.h>
67073 #include <linux/clocksource.h>
67074 #include <linux/jiffies.h>
67075 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
67076 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
67077 return -EINVAL;
67078
67079 + gr_log_timechange();
67080 +
67081 write_seqlock_irqsave(&xtime_lock, flags);
67082
67083 timekeeping_forward_now();
67084 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
67085 index 3258455..f35227d 100644
67086 --- a/kernel/time/timer_list.c
67087 +++ b/kernel/time/timer_list.c
67088 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
67089
67090 static void print_name_offset(struct seq_file *m, void *sym)
67091 {
67092 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67093 + SEQ_printf(m, "<%p>", NULL);
67094 +#else
67095 char symname[KSYM_NAME_LEN];
67096
67097 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
67098 SEQ_printf(m, "<%pK>", sym);
67099 else
67100 SEQ_printf(m, "%s", symname);
67101 +#endif
67102 }
67103
67104 static void
67105 @@ -112,7 +116,11 @@ next_one:
67106 static void
67107 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
67108 {
67109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67110 + SEQ_printf(m, " .base: %p\n", NULL);
67111 +#else
67112 SEQ_printf(m, " .base: %pK\n", base);
67113 +#endif
67114 SEQ_printf(m, " .index: %d\n",
67115 base->index);
67116 SEQ_printf(m, " .resolution: %Lu nsecs\n",
67117 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
67118 {
67119 struct proc_dir_entry *pe;
67120
67121 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67122 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
67123 +#else
67124 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
67125 +#endif
67126 if (!pe)
67127 return -ENOMEM;
67128 return 0;
67129 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
67130 index 0b537f2..9e71eca 100644
67131 --- a/kernel/time/timer_stats.c
67132 +++ b/kernel/time/timer_stats.c
67133 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
67134 static unsigned long nr_entries;
67135 static struct entry entries[MAX_ENTRIES];
67136
67137 -static atomic_t overflow_count;
67138 +static atomic_unchecked_t overflow_count;
67139
67140 /*
67141 * The entries are in a hash-table, for fast lookup:
67142 @@ -140,7 +140,7 @@ static void reset_entries(void)
67143 nr_entries = 0;
67144 memset(entries, 0, sizeof(entries));
67145 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
67146 - atomic_set(&overflow_count, 0);
67147 + atomic_set_unchecked(&overflow_count, 0);
67148 }
67149
67150 static struct entry *alloc_entry(void)
67151 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67152 if (likely(entry))
67153 entry->count++;
67154 else
67155 - atomic_inc(&overflow_count);
67156 + atomic_inc_unchecked(&overflow_count);
67157
67158 out_unlock:
67159 raw_spin_unlock_irqrestore(lock, flags);
67160 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
67161
67162 static void print_name_offset(struct seq_file *m, unsigned long addr)
67163 {
67164 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67165 + seq_printf(m, "<%p>", NULL);
67166 +#else
67167 char symname[KSYM_NAME_LEN];
67168
67169 if (lookup_symbol_name(addr, symname) < 0)
67170 seq_printf(m, "<%p>", (void *)addr);
67171 else
67172 seq_printf(m, "%s", symname);
67173 +#endif
67174 }
67175
67176 static int tstats_show(struct seq_file *m, void *v)
67177 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
67178
67179 seq_puts(m, "Timer Stats Version: v0.2\n");
67180 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
67181 - if (atomic_read(&overflow_count))
67182 + if (atomic_read_unchecked(&overflow_count))
67183 seq_printf(m, "Overflow: %d entries\n",
67184 - atomic_read(&overflow_count));
67185 + atomic_read_unchecked(&overflow_count));
67186
67187 for (i = 0; i < nr_entries; i++) {
67188 entry = entries + i;
67189 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
67190 {
67191 struct proc_dir_entry *pe;
67192
67193 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67194 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
67195 +#else
67196 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
67197 +#endif
67198 if (!pe)
67199 return -ENOMEM;
67200 return 0;
67201 diff --git a/kernel/timer.c b/kernel/timer.c
67202 index 9c3c62b..441690e 100644
67203 --- a/kernel/timer.c
67204 +++ b/kernel/timer.c
67205 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
67206 /*
67207 * This function runs timers and the timer-tq in bottom half context.
67208 */
67209 -static void run_timer_softirq(struct softirq_action *h)
67210 +static void run_timer_softirq(void)
67211 {
67212 struct tvec_base *base = __this_cpu_read(tvec_bases);
67213
67214 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
67215 index 16fc34a..efd8bb8 100644
67216 --- a/kernel/trace/blktrace.c
67217 +++ b/kernel/trace/blktrace.c
67218 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
67219 struct blk_trace *bt = filp->private_data;
67220 char buf[16];
67221
67222 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
67223 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
67224
67225 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
67226 }
67227 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
67228 return 1;
67229
67230 bt = buf->chan->private_data;
67231 - atomic_inc(&bt->dropped);
67232 + atomic_inc_unchecked(&bt->dropped);
67233 return 0;
67234 }
67235
67236 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
67237
67238 bt->dir = dir;
67239 bt->dev = dev;
67240 - atomic_set(&bt->dropped, 0);
67241 + atomic_set_unchecked(&bt->dropped, 0);
67242
67243 ret = -EIO;
67244 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
67245 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
67246 index 25b4f4d..6f4772d 100644
67247 --- a/kernel/trace/ftrace.c
67248 +++ b/kernel/trace/ftrace.c
67249 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67250 if (unlikely(ftrace_disabled))
67251 return 0;
67252
67253 + ret = ftrace_arch_code_modify_prepare();
67254 + FTRACE_WARN_ON(ret);
67255 + if (ret)
67256 + return 0;
67257 +
67258 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67259 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67260 if (ret) {
67261 ftrace_bug(ret, ip);
67262 - return 0;
67263 }
67264 - return 1;
67265 + return ret ? 0 : 1;
67266 }
67267
67268 /*
67269 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67270
67271 int
67272 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67273 - void *data)
67274 + void *data)
67275 {
67276 struct ftrace_func_probe *entry;
67277 struct ftrace_page *pg;
67278 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67279 index f2bd275..adaf3a2 100644
67280 --- a/kernel/trace/trace.c
67281 +++ b/kernel/trace/trace.c
67282 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67283 };
67284 #endif
67285
67286 -static struct dentry *d_tracer;
67287 -
67288 struct dentry *tracing_init_dentry(void)
67289 {
67290 + static struct dentry *d_tracer;
67291 static int once;
67292
67293 if (d_tracer)
67294 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67295 return d_tracer;
67296 }
67297
67298 -static struct dentry *d_percpu;
67299 -
67300 struct dentry *tracing_dentry_percpu(void)
67301 {
67302 + static struct dentry *d_percpu;
67303 static int once;
67304 struct dentry *d_tracer;
67305
67306 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67307 index c212a7f..7b02394 100644
67308 --- a/kernel/trace/trace_events.c
67309 +++ b/kernel/trace/trace_events.c
67310 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67311 struct ftrace_module_file_ops {
67312 struct list_head list;
67313 struct module *mod;
67314 - struct file_operations id;
67315 - struct file_operations enable;
67316 - struct file_operations format;
67317 - struct file_operations filter;
67318 };
67319
67320 static struct ftrace_module_file_ops *
67321 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67322
67323 file_ops->mod = mod;
67324
67325 - file_ops->id = ftrace_event_id_fops;
67326 - file_ops->id.owner = mod;
67327 -
67328 - file_ops->enable = ftrace_enable_fops;
67329 - file_ops->enable.owner = mod;
67330 -
67331 - file_ops->filter = ftrace_event_filter_fops;
67332 - file_ops->filter.owner = mod;
67333 -
67334 - file_ops->format = ftrace_event_format_fops;
67335 - file_ops->format.owner = mod;
67336 + pax_open_kernel();
67337 + *(void **)&mod->trace_id.owner = mod;
67338 + *(void **)&mod->trace_enable.owner = mod;
67339 + *(void **)&mod->trace_filter.owner = mod;
67340 + *(void **)&mod->trace_format.owner = mod;
67341 + pax_close_kernel();
67342
67343 list_add(&file_ops->list, &ftrace_module_file_list);
67344
67345 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67346
67347 for_each_event(call, start, end) {
67348 __trace_add_event_call(*call, mod,
67349 - &file_ops->id, &file_ops->enable,
67350 - &file_ops->filter, &file_ops->format);
67351 + &mod->trace_id, &mod->trace_enable,
67352 + &mod->trace_filter, &mod->trace_format);
67353 }
67354 }
67355
67356 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67357 index 00d527c..7c5b1a3 100644
67358 --- a/kernel/trace/trace_kprobe.c
67359 +++ b/kernel/trace/trace_kprobe.c
67360 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67361 long ret;
67362 int maxlen = get_rloc_len(*(u32 *)dest);
67363 u8 *dst = get_rloc_data(dest);
67364 - u8 *src = addr;
67365 + const u8 __user *src = (const u8 __force_user *)addr;
67366 mm_segment_t old_fs = get_fs();
67367 if (!maxlen)
67368 return;
67369 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67370 pagefault_disable();
67371 do
67372 ret = __copy_from_user_inatomic(dst++, src++, 1);
67373 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67374 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67375 dst[-1] = '\0';
67376 pagefault_enable();
67377 set_fs(old_fs);
67378 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67379 ((u8 *)get_rloc_data(dest))[0] = '\0';
67380 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67381 } else
67382 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67383 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67384 get_rloc_offs(*(u32 *)dest));
67385 }
67386 /* Return the length of string -- including null terminal byte */
67387 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67388 set_fs(KERNEL_DS);
67389 pagefault_disable();
67390 do {
67391 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67392 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67393 len++;
67394 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67395 pagefault_enable();
67396 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67397 index fd3c8aa..5f324a6 100644
67398 --- a/kernel/trace/trace_mmiotrace.c
67399 +++ b/kernel/trace/trace_mmiotrace.c
67400 @@ -24,7 +24,7 @@ struct header_iter {
67401 static struct trace_array *mmio_trace_array;
67402 static bool overrun_detected;
67403 static unsigned long prev_overruns;
67404 -static atomic_t dropped_count;
67405 +static atomic_unchecked_t dropped_count;
67406
67407 static void mmio_reset_data(struct trace_array *tr)
67408 {
67409 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67410
67411 static unsigned long count_overruns(struct trace_iterator *iter)
67412 {
67413 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67414 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67415 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67416
67417 if (over > prev_overruns)
67418 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67419 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67420 sizeof(*entry), 0, pc);
67421 if (!event) {
67422 - atomic_inc(&dropped_count);
67423 + atomic_inc_unchecked(&dropped_count);
67424 return;
67425 }
67426 entry = ring_buffer_event_data(event);
67427 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67428 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67429 sizeof(*entry), 0, pc);
67430 if (!event) {
67431 - atomic_inc(&dropped_count);
67432 + atomic_inc_unchecked(&dropped_count);
67433 return;
67434 }
67435 entry = ring_buffer_event_data(event);
67436 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67437 index 5199930..26c73a0 100644
67438 --- a/kernel/trace/trace_output.c
67439 +++ b/kernel/trace/trace_output.c
67440 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67441
67442 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67443 if (!IS_ERR(p)) {
67444 - p = mangle_path(s->buffer + s->len, p, "\n");
67445 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67446 if (p) {
67447 s->len = p - s->buffer;
67448 return 1;
67449 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67450 index 77575b3..6e623d1 100644
67451 --- a/kernel/trace/trace_stack.c
67452 +++ b/kernel/trace/trace_stack.c
67453 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67454 return;
67455
67456 /* we do not handle interrupt stacks yet */
67457 - if (!object_is_on_stack(&this_size))
67458 + if (!object_starts_on_stack(&this_size))
67459 return;
67460
67461 local_irq_save(flags);
67462 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67463 index 209b379..7f76423 100644
67464 --- a/kernel/trace/trace_workqueue.c
67465 +++ b/kernel/trace/trace_workqueue.c
67466 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67467 int cpu;
67468 pid_t pid;
67469 /* Can be inserted from interrupt or user context, need to be atomic */
67470 - atomic_t inserted;
67471 + atomic_unchecked_t inserted;
67472 /*
67473 * Don't need to be atomic, works are serialized in a single workqueue thread
67474 * on a single CPU.
67475 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67476 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67477 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67478 if (node->pid == wq_thread->pid) {
67479 - atomic_inc(&node->inserted);
67480 + atomic_inc_unchecked(&node->inserted);
67481 goto found;
67482 }
67483 }
67484 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67485 tsk = get_pid_task(pid, PIDTYPE_PID);
67486 if (tsk) {
67487 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67488 - atomic_read(&cws->inserted), cws->executed,
67489 + atomic_read_unchecked(&cws->inserted), cws->executed,
67490 tsk->comm);
67491 put_task_struct(tsk);
67492 }
67493 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67494 index 82928f5..92da771 100644
67495 --- a/lib/Kconfig.debug
67496 +++ b/lib/Kconfig.debug
67497 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67498 depends on DEBUG_KERNEL
67499 depends on STACKTRACE_SUPPORT
67500 depends on PROC_FS
67501 + depends on !GRKERNSEC_HIDESYM
67502 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67503 select KALLSYMS
67504 select KALLSYMS_ALL
67505 diff --git a/lib/bitmap.c b/lib/bitmap.c
67506 index 0d4a127..33a06c7 100644
67507 --- a/lib/bitmap.c
67508 +++ b/lib/bitmap.c
67509 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67510 {
67511 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67512 u32 chunk;
67513 - const char __user __force *ubuf = (const char __user __force *)buf;
67514 + const char __user *ubuf = (const char __force_user *)buf;
67515
67516 bitmap_zero(maskp, nmaskbits);
67517
67518 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67519 {
67520 if (!access_ok(VERIFY_READ, ubuf, ulen))
67521 return -EFAULT;
67522 - return __bitmap_parse((const char __force *)ubuf,
67523 + return __bitmap_parse((const char __force_kernel *)ubuf,
67524 ulen, 1, maskp, nmaskbits);
67525
67526 }
67527 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67528 {
67529 unsigned a, b;
67530 int c, old_c, totaldigits;
67531 - const char __user __force *ubuf = (const char __user __force *)buf;
67532 + const char __user *ubuf = (const char __force_user *)buf;
67533 int exp_digit, in_range;
67534
67535 totaldigits = c = 0;
67536 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67537 {
67538 if (!access_ok(VERIFY_READ, ubuf, ulen))
67539 return -EFAULT;
67540 - return __bitmap_parselist((const char __force *)ubuf,
67541 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67542 ulen, 1, maskp, nmaskbits);
67543 }
67544 EXPORT_SYMBOL(bitmap_parselist_user);
67545 diff --git a/lib/bug.c b/lib/bug.c
67546 index 1955209..cbbb2ad 100644
67547 --- a/lib/bug.c
67548 +++ b/lib/bug.c
67549 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67550 return BUG_TRAP_TYPE_NONE;
67551
67552 bug = find_bug(bugaddr);
67553 + if (!bug)
67554 + return BUG_TRAP_TYPE_NONE;
67555
67556 file = NULL;
67557 line = 0;
67558 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67559 index a78b7c6..2c73084 100644
67560 --- a/lib/debugobjects.c
67561 +++ b/lib/debugobjects.c
67562 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67563 if (limit > 4)
67564 return;
67565
67566 - is_on_stack = object_is_on_stack(addr);
67567 + is_on_stack = object_starts_on_stack(addr);
67568 if (is_on_stack == onstack)
67569 return;
67570
67571 diff --git a/lib/devres.c b/lib/devres.c
67572 index 7c0e953..f642b5c 100644
67573 --- a/lib/devres.c
67574 +++ b/lib/devres.c
67575 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67576 void devm_iounmap(struct device *dev, void __iomem *addr)
67577 {
67578 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67579 - (void *)addr));
67580 + (void __force *)addr));
67581 iounmap(addr);
67582 }
67583 EXPORT_SYMBOL(devm_iounmap);
67584 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67585 {
67586 ioport_unmap(addr);
67587 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67588 - devm_ioport_map_match, (void *)addr));
67589 + devm_ioport_map_match, (void __force *)addr));
67590 }
67591 EXPORT_SYMBOL(devm_ioport_unmap);
67592
67593 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67594 index fea790a..ebb0e82 100644
67595 --- a/lib/dma-debug.c
67596 +++ b/lib/dma-debug.c
67597 @@ -925,7 +925,7 @@ out:
67598
67599 static void check_for_stack(struct device *dev, void *addr)
67600 {
67601 - if (object_is_on_stack(addr))
67602 + if (object_starts_on_stack(addr))
67603 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67604 "stack [addr=%p]\n", addr);
67605 }
67606 diff --git a/lib/extable.c b/lib/extable.c
67607 index 4cac81e..63e9b8f 100644
67608 --- a/lib/extable.c
67609 +++ b/lib/extable.c
67610 @@ -13,6 +13,7 @@
67611 #include <linux/init.h>
67612 #include <linux/sort.h>
67613 #include <asm/uaccess.h>
67614 +#include <asm/pgtable.h>
67615
67616 #ifndef ARCH_HAS_SORT_EXTABLE
67617 /*
67618 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67619 void sort_extable(struct exception_table_entry *start,
67620 struct exception_table_entry *finish)
67621 {
67622 + pax_open_kernel();
67623 sort(start, finish - start, sizeof(struct exception_table_entry),
67624 cmp_ex, NULL);
67625 + pax_close_kernel();
67626 }
67627
67628 #ifdef CONFIG_MODULES
67629 diff --git a/lib/inflate.c b/lib/inflate.c
67630 index 013a761..c28f3fc 100644
67631 --- a/lib/inflate.c
67632 +++ b/lib/inflate.c
67633 @@ -269,7 +269,7 @@ static void free(void *where)
67634 malloc_ptr = free_mem_ptr;
67635 }
67636 #else
67637 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67638 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67639 #define free(a) kfree(a)
67640 #endif
67641
67642 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67643 index bd2bea9..6b3c95e 100644
67644 --- a/lib/is_single_threaded.c
67645 +++ b/lib/is_single_threaded.c
67646 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67647 struct task_struct *p, *t;
67648 bool ret;
67649
67650 + if (!mm)
67651 + return true;
67652 +
67653 if (atomic_read(&task->signal->live) != 1)
67654 return false;
67655
67656 diff --git a/lib/kref.c b/lib/kref.c
67657 index 3efb882..8492f4c 100644
67658 --- a/lib/kref.c
67659 +++ b/lib/kref.c
67660 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67661 */
67662 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67663 {
67664 - WARN_ON(release == NULL);
67665 + BUG_ON(release == NULL);
67666 WARN_ON(release == (void (*)(struct kref *))kfree);
67667
67668 if (atomic_dec_and_test(&kref->refcount)) {
67669 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67670 index d9df745..e73c2fe 100644
67671 --- a/lib/radix-tree.c
67672 +++ b/lib/radix-tree.c
67673 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67674 int nr;
67675 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67676 };
67677 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67678 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67679
67680 static inline void *ptr_to_indirect(void *ptr)
67681 {
67682 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67683 index 993599e..84dc70e 100644
67684 --- a/lib/vsprintf.c
67685 +++ b/lib/vsprintf.c
67686 @@ -16,6 +16,9 @@
67687 * - scnprintf and vscnprintf
67688 */
67689
67690 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67691 +#define __INCLUDED_BY_HIDESYM 1
67692 +#endif
67693 #include <stdarg.h>
67694 #include <linux/module.h>
67695 #include <linux/types.h>
67696 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67697 char sym[KSYM_SYMBOL_LEN];
67698 if (ext == 'B')
67699 sprint_backtrace(sym, value);
67700 - else if (ext != 'f' && ext != 's')
67701 + else if (ext != 'f' && ext != 's' && ext != 'a')
67702 sprint_symbol(sym, value);
67703 else
67704 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67705 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67706 return string(buf, end, uuid, spec);
67707 }
67708
67709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67710 +int kptr_restrict __read_mostly = 2;
67711 +#else
67712 int kptr_restrict __read_mostly;
67713 +#endif
67714
67715 /*
67716 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67717 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67718 * - 'S' For symbolic direct pointers with offset
67719 * - 's' For symbolic direct pointers without offset
67720 * - 'B' For backtraced symbolic direct pointers with offset
67721 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67722 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67723 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67724 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67725 * - 'M' For a 6-byte MAC address, it prints the address in the
67726 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67727 {
67728 if (!ptr && *fmt != 'K') {
67729 /*
67730 - * Print (null) with the same width as a pointer so it makes
67731 + * Print (nil) with the same width as a pointer so it makes
67732 * tabular output look nice.
67733 */
67734 if (spec.field_width == -1)
67735 spec.field_width = 2 * sizeof(void *);
67736 - return string(buf, end, "(null)", spec);
67737 + return string(buf, end, "(nil)", spec);
67738 }
67739
67740 switch (*fmt) {
67741 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67742 /* Fallthrough */
67743 case 'S':
67744 case 's':
67745 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67746 + break;
67747 +#else
67748 + return symbol_string(buf, end, ptr, spec, *fmt);
67749 +#endif
67750 + case 'A':
67751 + case 'a':
67752 case 'B':
67753 return symbol_string(buf, end, ptr, spec, *fmt);
67754 case 'R':
67755 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67756 typeof(type) value; \
67757 if (sizeof(type) == 8) { \
67758 args = PTR_ALIGN(args, sizeof(u32)); \
67759 - *(u32 *)&value = *(u32 *)args; \
67760 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67761 + *(u32 *)&value = *(const u32 *)args; \
67762 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67763 } else { \
67764 args = PTR_ALIGN(args, sizeof(type)); \
67765 - value = *(typeof(type) *)args; \
67766 + value = *(const typeof(type) *)args; \
67767 } \
67768 args += sizeof(type); \
67769 value; \
67770 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67771 case FORMAT_TYPE_STR: {
67772 const char *str_arg = args;
67773 args += strlen(str_arg) + 1;
67774 - str = string(str, end, (char *)str_arg, spec);
67775 + str = string(str, end, str_arg, spec);
67776 break;
67777 }
67778
67779 diff --git a/localversion-grsec b/localversion-grsec
67780 new file mode 100644
67781 index 0000000..7cd6065
67782 --- /dev/null
67783 +++ b/localversion-grsec
67784 @@ -0,0 +1 @@
67785 +-grsec
67786 diff --git a/mm/Kconfig b/mm/Kconfig
67787 index 011b110..b492af2 100644
67788 --- a/mm/Kconfig
67789 +++ b/mm/Kconfig
67790 @@ -241,10 +241,10 @@ config KSM
67791 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67792
67793 config DEFAULT_MMAP_MIN_ADDR
67794 - int "Low address space to protect from user allocation"
67795 + int "Low address space to protect from user allocation"
67796 depends on MMU
67797 - default 4096
67798 - help
67799 + default 65536
67800 + help
67801 This is the portion of low virtual memory which should be protected
67802 from userspace allocation. Keeping a user from writing to low pages
67803 can help reduce the impact of kernel NULL pointer bugs.
67804 diff --git a/mm/filemap.c b/mm/filemap.c
67805 index 03c5b0e..a01e793 100644
67806 --- a/mm/filemap.c
67807 +++ b/mm/filemap.c
67808 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67809 struct address_space *mapping = file->f_mapping;
67810
67811 if (!mapping->a_ops->readpage)
67812 - return -ENOEXEC;
67813 + return -ENODEV;
67814 file_accessed(file);
67815 vma->vm_ops = &generic_file_vm_ops;
67816 vma->vm_flags |= VM_CAN_NONLINEAR;
67817 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67818 *pos = i_size_read(inode);
67819
67820 if (limit != RLIM_INFINITY) {
67821 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67822 if (*pos >= limit) {
67823 send_sig(SIGXFSZ, current, 0);
67824 return -EFBIG;
67825 diff --git a/mm/fremap.c b/mm/fremap.c
67826 index 9ed4fd4..c42648d 100644
67827 --- a/mm/fremap.c
67828 +++ b/mm/fremap.c
67829 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67830 retry:
67831 vma = find_vma(mm, start);
67832
67833 +#ifdef CONFIG_PAX_SEGMEXEC
67834 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67835 + goto out;
67836 +#endif
67837 +
67838 /*
67839 * Make sure the vma is shared, that it supports prefaulting,
67840 * and that the remapped range is valid and fully within
67841 diff --git a/mm/highmem.c b/mm/highmem.c
67842 index 57d82c6..e9e0552 100644
67843 --- a/mm/highmem.c
67844 +++ b/mm/highmem.c
67845 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67846 * So no dangers, even with speculative execution.
67847 */
67848 page = pte_page(pkmap_page_table[i]);
67849 + pax_open_kernel();
67850 pte_clear(&init_mm, (unsigned long)page_address(page),
67851 &pkmap_page_table[i]);
67852 -
67853 + pax_close_kernel();
67854 set_page_address(page, NULL);
67855 need_flush = 1;
67856 }
67857 @@ -186,9 +187,11 @@ start:
67858 }
67859 }
67860 vaddr = PKMAP_ADDR(last_pkmap_nr);
67861 +
67862 + pax_open_kernel();
67863 set_pte_at(&init_mm, vaddr,
67864 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67865 -
67866 + pax_close_kernel();
67867 pkmap_count[last_pkmap_nr] = 1;
67868 set_page_address(page, (void *)vaddr);
67869
67870 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67871 index 33141f5..e56bef9 100644
67872 --- a/mm/huge_memory.c
67873 +++ b/mm/huge_memory.c
67874 @@ -703,7 +703,7 @@ out:
67875 * run pte_offset_map on the pmd, if an huge pmd could
67876 * materialize from under us from a different thread.
67877 */
67878 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67879 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67880 return VM_FAULT_OOM;
67881 /* if an huge pmd materialized from under us just retry later */
67882 if (unlikely(pmd_trans_huge(*pmd)))
67883 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67884 index 2316840..b418671 100644
67885 --- a/mm/hugetlb.c
67886 +++ b/mm/hugetlb.c
67887 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67888 return 1;
67889 }
67890
67891 +#ifdef CONFIG_PAX_SEGMEXEC
67892 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67893 +{
67894 + struct mm_struct *mm = vma->vm_mm;
67895 + struct vm_area_struct *vma_m;
67896 + unsigned long address_m;
67897 + pte_t *ptep_m;
67898 +
67899 + vma_m = pax_find_mirror_vma(vma);
67900 + if (!vma_m)
67901 + return;
67902 +
67903 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67904 + address_m = address + SEGMEXEC_TASK_SIZE;
67905 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67906 + get_page(page_m);
67907 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67908 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67909 +}
67910 +#endif
67911 +
67912 /*
67913 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67914 */
67915 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67916 make_huge_pte(vma, new_page, 1));
67917 page_remove_rmap(old_page);
67918 hugepage_add_new_anon_rmap(new_page, vma, address);
67919 +
67920 +#ifdef CONFIG_PAX_SEGMEXEC
67921 + pax_mirror_huge_pte(vma, address, new_page);
67922 +#endif
67923 +
67924 /* Make the old page be freed below */
67925 new_page = old_page;
67926 mmu_notifier_invalidate_range_end(mm,
67927 @@ -2601,6 +2627,10 @@ retry:
67928 && (vma->vm_flags & VM_SHARED)));
67929 set_huge_pte_at(mm, address, ptep, new_pte);
67930
67931 +#ifdef CONFIG_PAX_SEGMEXEC
67932 + pax_mirror_huge_pte(vma, address, page);
67933 +#endif
67934 +
67935 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67936 /* Optimization, do the COW without a second fault */
67937 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67938 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67939 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67940 struct hstate *h = hstate_vma(vma);
67941
67942 +#ifdef CONFIG_PAX_SEGMEXEC
67943 + struct vm_area_struct *vma_m;
67944 +#endif
67945 +
67946 ptep = huge_pte_offset(mm, address);
67947 if (ptep) {
67948 entry = huge_ptep_get(ptep);
67949 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67950 VM_FAULT_SET_HINDEX(h - hstates);
67951 }
67952
67953 +#ifdef CONFIG_PAX_SEGMEXEC
67954 + vma_m = pax_find_mirror_vma(vma);
67955 + if (vma_m) {
67956 + unsigned long address_m;
67957 +
67958 + if (vma->vm_start > vma_m->vm_start) {
67959 + address_m = address;
67960 + address -= SEGMEXEC_TASK_SIZE;
67961 + vma = vma_m;
67962 + h = hstate_vma(vma);
67963 + } else
67964 + address_m = address + SEGMEXEC_TASK_SIZE;
67965 +
67966 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67967 + return VM_FAULT_OOM;
67968 + address_m &= HPAGE_MASK;
67969 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67970 + }
67971 +#endif
67972 +
67973 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67974 if (!ptep)
67975 return VM_FAULT_OOM;
67976 diff --git a/mm/internal.h b/mm/internal.h
67977 index 2189af4..f2ca332 100644
67978 --- a/mm/internal.h
67979 +++ b/mm/internal.h
67980 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67981 * in mm/page_alloc.c
67982 */
67983 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67984 +extern void free_compound_page(struct page *page);
67985 extern void prep_compound_page(struct page *page, unsigned long order);
67986 #ifdef CONFIG_MEMORY_FAILURE
67987 extern bool is_free_buddy_page(struct page *page);
67988 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67989 index f3b2a00..61da94d 100644
67990 --- a/mm/kmemleak.c
67991 +++ b/mm/kmemleak.c
67992 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67993
67994 for (i = 0; i < object->trace_len; i++) {
67995 void *ptr = (void *)object->trace[i];
67996 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67997 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67998 }
67999 }
68000
68001 diff --git a/mm/maccess.c b/mm/maccess.c
68002 index d53adf9..03a24bf 100644
68003 --- a/mm/maccess.c
68004 +++ b/mm/maccess.c
68005 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
68006 set_fs(KERNEL_DS);
68007 pagefault_disable();
68008 ret = __copy_from_user_inatomic(dst,
68009 - (__force const void __user *)src, size);
68010 + (const void __force_user *)src, size);
68011 pagefault_enable();
68012 set_fs(old_fs);
68013
68014 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
68015
68016 set_fs(KERNEL_DS);
68017 pagefault_disable();
68018 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
68019 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
68020 pagefault_enable();
68021 set_fs(old_fs);
68022
68023 diff --git a/mm/madvise.c b/mm/madvise.c
68024 index 74bf193..feb6fd3 100644
68025 --- a/mm/madvise.c
68026 +++ b/mm/madvise.c
68027 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
68028 pgoff_t pgoff;
68029 unsigned long new_flags = vma->vm_flags;
68030
68031 +#ifdef CONFIG_PAX_SEGMEXEC
68032 + struct vm_area_struct *vma_m;
68033 +#endif
68034 +
68035 switch (behavior) {
68036 case MADV_NORMAL:
68037 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
68038 @@ -110,6 +114,13 @@ success:
68039 /*
68040 * vm_flags is protected by the mmap_sem held in write mode.
68041 */
68042 +
68043 +#ifdef CONFIG_PAX_SEGMEXEC
68044 + vma_m = pax_find_mirror_vma(vma);
68045 + if (vma_m)
68046 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
68047 +#endif
68048 +
68049 vma->vm_flags = new_flags;
68050
68051 out:
68052 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68053 struct vm_area_struct ** prev,
68054 unsigned long start, unsigned long end)
68055 {
68056 +
68057 +#ifdef CONFIG_PAX_SEGMEXEC
68058 + struct vm_area_struct *vma_m;
68059 +#endif
68060 +
68061 *prev = vma;
68062 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
68063 return -EINVAL;
68064 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
68065 zap_page_range(vma, start, end - start, &details);
68066 } else
68067 zap_page_range(vma, start, end - start, NULL);
68068 +
68069 +#ifdef CONFIG_PAX_SEGMEXEC
68070 + vma_m = pax_find_mirror_vma(vma);
68071 + if (vma_m) {
68072 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
68073 + struct zap_details details = {
68074 + .nonlinear_vma = vma_m,
68075 + .last_index = ULONG_MAX,
68076 + };
68077 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
68078 + } else
68079 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
68080 + }
68081 +#endif
68082 +
68083 return 0;
68084 }
68085
68086 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
68087 if (end < start)
68088 goto out;
68089
68090 +#ifdef CONFIG_PAX_SEGMEXEC
68091 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
68092 + if (end > SEGMEXEC_TASK_SIZE)
68093 + goto out;
68094 + } else
68095 +#endif
68096 +
68097 + if (end > TASK_SIZE)
68098 + goto out;
68099 +
68100 error = 0;
68101 if (end == start)
68102 goto out;
68103 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
68104 index 06d3479..0778eef 100644
68105 --- a/mm/memory-failure.c
68106 +++ b/mm/memory-failure.c
68107 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
68108
68109 int sysctl_memory_failure_recovery __read_mostly = 1;
68110
68111 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68112 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68113
68114 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
68115
68116 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
68117 si.si_signo = SIGBUS;
68118 si.si_errno = 0;
68119 si.si_code = BUS_MCEERR_AO;
68120 - si.si_addr = (void *)addr;
68121 + si.si_addr = (void __user *)addr;
68122 #ifdef __ARCH_SI_TRAPNO
68123 si.si_trapno = trapno;
68124 #endif
68125 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68126 }
68127
68128 nr_pages = 1 << compound_trans_order(hpage);
68129 - atomic_long_add(nr_pages, &mce_bad_pages);
68130 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
68131
68132 /*
68133 * We need/can do nothing about count=0 pages.
68134 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68135 if (!PageHWPoison(hpage)
68136 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
68137 || (p != hpage && TestSetPageHWPoison(hpage))) {
68138 - atomic_long_sub(nr_pages, &mce_bad_pages);
68139 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68140 return 0;
68141 }
68142 set_page_hwpoison_huge_page(hpage);
68143 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
68144 }
68145 if (hwpoison_filter(p)) {
68146 if (TestClearPageHWPoison(p))
68147 - atomic_long_sub(nr_pages, &mce_bad_pages);
68148 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68149 unlock_page(hpage);
68150 put_page(hpage);
68151 return 0;
68152 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
68153 return 0;
68154 }
68155 if (TestClearPageHWPoison(p))
68156 - atomic_long_sub(nr_pages, &mce_bad_pages);
68157 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68158 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
68159 return 0;
68160 }
68161 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
68162 */
68163 if (TestClearPageHWPoison(page)) {
68164 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
68165 - atomic_long_sub(nr_pages, &mce_bad_pages);
68166 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
68167 freeit = 1;
68168 if (PageHuge(page))
68169 clear_page_hwpoison_huge_page(page);
68170 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
68171 }
68172 done:
68173 if (!PageHWPoison(hpage))
68174 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
68175 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
68176 set_page_hwpoison_huge_page(hpage);
68177 dequeue_hwpoisoned_huge_page(hpage);
68178 /* keep elevated page count for bad page */
68179 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
68180 return ret;
68181
68182 done:
68183 - atomic_long_add(1, &mce_bad_pages);
68184 + atomic_long_add_unchecked(1, &mce_bad_pages);
68185 SetPageHWPoison(page);
68186 /* keep elevated page count for bad page */
68187 return ret;
68188 diff --git a/mm/memory.c b/mm/memory.c
68189 index 829d437..3d3926a 100644
68190 --- a/mm/memory.c
68191 +++ b/mm/memory.c
68192 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
68193 return;
68194
68195 pmd = pmd_offset(pud, start);
68196 +
68197 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
68198 pud_clear(pud);
68199 pmd_free_tlb(tlb, pmd, start);
68200 +#endif
68201 +
68202 }
68203
68204 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68205 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
68206 if (end - 1 > ceiling - 1)
68207 return;
68208
68209 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
68210 pud = pud_offset(pgd, start);
68211 pgd_clear(pgd);
68212 pud_free_tlb(tlb, pud, start);
68213 +#endif
68214 +
68215 }
68216
68217 /*
68218 @@ -1566,12 +1573,6 @@ no_page_table:
68219 return page;
68220 }
68221
68222 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68223 -{
68224 - return stack_guard_page_start(vma, addr) ||
68225 - stack_guard_page_end(vma, addr+PAGE_SIZE);
68226 -}
68227 -
68228 /**
68229 * __get_user_pages() - pin user pages in memory
68230 * @tsk: task_struct of target task
68231 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68232 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
68233 i = 0;
68234
68235 - do {
68236 + while (nr_pages) {
68237 struct vm_area_struct *vma;
68238
68239 - vma = find_extend_vma(mm, start);
68240 + vma = find_vma(mm, start);
68241 if (!vma && in_gate_area(mm, start)) {
68242 unsigned long pg = start & PAGE_MASK;
68243 pgd_t *pgd;
68244 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68245 goto next_page;
68246 }
68247
68248 - if (!vma ||
68249 + if (!vma || start < vma->vm_start ||
68250 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68251 !(vm_flags & vma->vm_flags))
68252 return i ? : -EFAULT;
68253 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68254 int ret;
68255 unsigned int fault_flags = 0;
68256
68257 - /* For mlock, just skip the stack guard page. */
68258 - if (foll_flags & FOLL_MLOCK) {
68259 - if (stack_guard_page(vma, start))
68260 - goto next_page;
68261 - }
68262 if (foll_flags & FOLL_WRITE)
68263 fault_flags |= FAULT_FLAG_WRITE;
68264 if (nonblocking)
68265 @@ -1800,7 +1796,7 @@ next_page:
68266 start += PAGE_SIZE;
68267 nr_pages--;
68268 } while (nr_pages && start < vma->vm_end);
68269 - } while (nr_pages);
68270 + }
68271 return i;
68272 }
68273 EXPORT_SYMBOL(__get_user_pages);
68274 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68275 page_add_file_rmap(page);
68276 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68277
68278 +#ifdef CONFIG_PAX_SEGMEXEC
68279 + pax_mirror_file_pte(vma, addr, page, ptl);
68280 +#endif
68281 +
68282 retval = 0;
68283 pte_unmap_unlock(pte, ptl);
68284 return retval;
68285 @@ -2041,10 +2041,22 @@ out:
68286 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68287 struct page *page)
68288 {
68289 +
68290 +#ifdef CONFIG_PAX_SEGMEXEC
68291 + struct vm_area_struct *vma_m;
68292 +#endif
68293 +
68294 if (addr < vma->vm_start || addr >= vma->vm_end)
68295 return -EFAULT;
68296 if (!page_count(page))
68297 return -EINVAL;
68298 +
68299 +#ifdef CONFIG_PAX_SEGMEXEC
68300 + vma_m = pax_find_mirror_vma(vma);
68301 + if (vma_m)
68302 + vma_m->vm_flags |= VM_INSERTPAGE;
68303 +#endif
68304 +
68305 vma->vm_flags |= VM_INSERTPAGE;
68306 return insert_page(vma, addr, page, vma->vm_page_prot);
68307 }
68308 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68309 unsigned long pfn)
68310 {
68311 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68312 + BUG_ON(vma->vm_mirror);
68313
68314 if (addr < vma->vm_start || addr >= vma->vm_end)
68315 return -EFAULT;
68316 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68317 copy_user_highpage(dst, src, va, vma);
68318 }
68319
68320 +#ifdef CONFIG_PAX_SEGMEXEC
68321 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68322 +{
68323 + struct mm_struct *mm = vma->vm_mm;
68324 + spinlock_t *ptl;
68325 + pte_t *pte, entry;
68326 +
68327 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68328 + entry = *pte;
68329 + if (!pte_present(entry)) {
68330 + if (!pte_none(entry)) {
68331 + BUG_ON(pte_file(entry));
68332 + free_swap_and_cache(pte_to_swp_entry(entry));
68333 + pte_clear_not_present_full(mm, address, pte, 0);
68334 + }
68335 + } else {
68336 + struct page *page;
68337 +
68338 + flush_cache_page(vma, address, pte_pfn(entry));
68339 + entry = ptep_clear_flush(vma, address, pte);
68340 + BUG_ON(pte_dirty(entry));
68341 + page = vm_normal_page(vma, address, entry);
68342 + if (page) {
68343 + update_hiwater_rss(mm);
68344 + if (PageAnon(page))
68345 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68346 + else
68347 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68348 + page_remove_rmap(page);
68349 + page_cache_release(page);
68350 + }
68351 + }
68352 + pte_unmap_unlock(pte, ptl);
68353 +}
68354 +
68355 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68356 + *
68357 + * the ptl of the lower mapped page is held on entry and is not released on exit
68358 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68359 + */
68360 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68361 +{
68362 + struct mm_struct *mm = vma->vm_mm;
68363 + unsigned long address_m;
68364 + spinlock_t *ptl_m;
68365 + struct vm_area_struct *vma_m;
68366 + pmd_t *pmd_m;
68367 + pte_t *pte_m, entry_m;
68368 +
68369 + BUG_ON(!page_m || !PageAnon(page_m));
68370 +
68371 + vma_m = pax_find_mirror_vma(vma);
68372 + if (!vma_m)
68373 + return;
68374 +
68375 + BUG_ON(!PageLocked(page_m));
68376 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68377 + address_m = address + SEGMEXEC_TASK_SIZE;
68378 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68379 + pte_m = pte_offset_map(pmd_m, address_m);
68380 + ptl_m = pte_lockptr(mm, pmd_m);
68381 + if (ptl != ptl_m) {
68382 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68383 + if (!pte_none(*pte_m))
68384 + goto out;
68385 + }
68386 +
68387 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68388 + page_cache_get(page_m);
68389 + page_add_anon_rmap(page_m, vma_m, address_m);
68390 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68391 + set_pte_at(mm, address_m, pte_m, entry_m);
68392 + update_mmu_cache(vma_m, address_m, entry_m);
68393 +out:
68394 + if (ptl != ptl_m)
68395 + spin_unlock(ptl_m);
68396 + pte_unmap(pte_m);
68397 + unlock_page(page_m);
68398 +}
68399 +
68400 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68401 +{
68402 + struct mm_struct *mm = vma->vm_mm;
68403 + unsigned long address_m;
68404 + spinlock_t *ptl_m;
68405 + struct vm_area_struct *vma_m;
68406 + pmd_t *pmd_m;
68407 + pte_t *pte_m, entry_m;
68408 +
68409 + BUG_ON(!page_m || PageAnon(page_m));
68410 +
68411 + vma_m = pax_find_mirror_vma(vma);
68412 + if (!vma_m)
68413 + return;
68414 +
68415 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68416 + address_m = address + SEGMEXEC_TASK_SIZE;
68417 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68418 + pte_m = pte_offset_map(pmd_m, address_m);
68419 + ptl_m = pte_lockptr(mm, pmd_m);
68420 + if (ptl != ptl_m) {
68421 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68422 + if (!pte_none(*pte_m))
68423 + goto out;
68424 + }
68425 +
68426 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68427 + page_cache_get(page_m);
68428 + page_add_file_rmap(page_m);
68429 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68430 + set_pte_at(mm, address_m, pte_m, entry_m);
68431 + update_mmu_cache(vma_m, address_m, entry_m);
68432 +out:
68433 + if (ptl != ptl_m)
68434 + spin_unlock(ptl_m);
68435 + pte_unmap(pte_m);
68436 +}
68437 +
68438 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68439 +{
68440 + struct mm_struct *mm = vma->vm_mm;
68441 + unsigned long address_m;
68442 + spinlock_t *ptl_m;
68443 + struct vm_area_struct *vma_m;
68444 + pmd_t *pmd_m;
68445 + pte_t *pte_m, entry_m;
68446 +
68447 + vma_m = pax_find_mirror_vma(vma);
68448 + if (!vma_m)
68449 + return;
68450 +
68451 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68452 + address_m = address + SEGMEXEC_TASK_SIZE;
68453 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68454 + pte_m = pte_offset_map(pmd_m, address_m);
68455 + ptl_m = pte_lockptr(mm, pmd_m);
68456 + if (ptl != ptl_m) {
68457 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68458 + if (!pte_none(*pte_m))
68459 + goto out;
68460 + }
68461 +
68462 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68463 + set_pte_at(mm, address_m, pte_m, entry_m);
68464 +out:
68465 + if (ptl != ptl_m)
68466 + spin_unlock(ptl_m);
68467 + pte_unmap(pte_m);
68468 +}
68469 +
68470 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68471 +{
68472 + struct page *page_m;
68473 + pte_t entry;
68474 +
68475 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68476 + goto out;
68477 +
68478 + entry = *pte;
68479 + page_m = vm_normal_page(vma, address, entry);
68480 + if (!page_m)
68481 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68482 + else if (PageAnon(page_m)) {
68483 + if (pax_find_mirror_vma(vma)) {
68484 + pte_unmap_unlock(pte, ptl);
68485 + lock_page(page_m);
68486 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68487 + if (pte_same(entry, *pte))
68488 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68489 + else
68490 + unlock_page(page_m);
68491 + }
68492 + } else
68493 + pax_mirror_file_pte(vma, address, page_m, ptl);
68494 +
68495 +out:
68496 + pte_unmap_unlock(pte, ptl);
68497 +}
68498 +#endif
68499 +
68500 /*
68501 * This routine handles present pages, when users try to write
68502 * to a shared page. It is done by copying the page to a new address
68503 @@ -2656,6 +2849,12 @@ gotten:
68504 */
68505 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68506 if (likely(pte_same(*page_table, orig_pte))) {
68507 +
68508 +#ifdef CONFIG_PAX_SEGMEXEC
68509 + if (pax_find_mirror_vma(vma))
68510 + BUG_ON(!trylock_page(new_page));
68511 +#endif
68512 +
68513 if (old_page) {
68514 if (!PageAnon(old_page)) {
68515 dec_mm_counter_fast(mm, MM_FILEPAGES);
68516 @@ -2707,6 +2906,10 @@ gotten:
68517 page_remove_rmap(old_page);
68518 }
68519
68520 +#ifdef CONFIG_PAX_SEGMEXEC
68521 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68522 +#endif
68523 +
68524 /* Free the old page.. */
68525 new_page = old_page;
68526 ret |= VM_FAULT_WRITE;
68527 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68528 swap_free(entry);
68529 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68530 try_to_free_swap(page);
68531 +
68532 +#ifdef CONFIG_PAX_SEGMEXEC
68533 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68534 +#endif
68535 +
68536 unlock_page(page);
68537 if (swapcache) {
68538 /*
68539 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68540
68541 /* No need to invalidate - it was non-present before */
68542 update_mmu_cache(vma, address, page_table);
68543 +
68544 +#ifdef CONFIG_PAX_SEGMEXEC
68545 + pax_mirror_anon_pte(vma, address, page, ptl);
68546 +#endif
68547 +
68548 unlock:
68549 pte_unmap_unlock(page_table, ptl);
68550 out:
68551 @@ -3028,40 +3241,6 @@ out_release:
68552 }
68553
68554 /*
68555 - * This is like a special single-page "expand_{down|up}wards()",
68556 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68557 - * doesn't hit another vma.
68558 - */
68559 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68560 -{
68561 - address &= PAGE_MASK;
68562 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68563 - struct vm_area_struct *prev = vma->vm_prev;
68564 -
68565 - /*
68566 - * Is there a mapping abutting this one below?
68567 - *
68568 - * That's only ok if it's the same stack mapping
68569 - * that has gotten split..
68570 - */
68571 - if (prev && prev->vm_end == address)
68572 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68573 -
68574 - expand_downwards(vma, address - PAGE_SIZE);
68575 - }
68576 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68577 - struct vm_area_struct *next = vma->vm_next;
68578 -
68579 - /* As VM_GROWSDOWN but s/below/above/ */
68580 - if (next && next->vm_start == address + PAGE_SIZE)
68581 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68582 -
68583 - expand_upwards(vma, address + PAGE_SIZE);
68584 - }
68585 - return 0;
68586 -}
68587 -
68588 -/*
68589 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68590 * but allow concurrent faults), and pte mapped but not yet locked.
68591 * We return with mmap_sem still held, but pte unmapped and unlocked.
68592 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68593 unsigned long address, pte_t *page_table, pmd_t *pmd,
68594 unsigned int flags)
68595 {
68596 - struct page *page;
68597 + struct page *page = NULL;
68598 spinlock_t *ptl;
68599 pte_t entry;
68600
68601 - pte_unmap(page_table);
68602 -
68603 - /* Check if we need to add a guard page to the stack */
68604 - if (check_stack_guard_page(vma, address) < 0)
68605 - return VM_FAULT_SIGBUS;
68606 -
68607 - /* Use the zero-page for reads */
68608 if (!(flags & FAULT_FLAG_WRITE)) {
68609 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68610 vma->vm_page_prot));
68611 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68612 + ptl = pte_lockptr(mm, pmd);
68613 + spin_lock(ptl);
68614 if (!pte_none(*page_table))
68615 goto unlock;
68616 goto setpte;
68617 }
68618
68619 /* Allocate our own private page. */
68620 + pte_unmap(page_table);
68621 +
68622 if (unlikely(anon_vma_prepare(vma)))
68623 goto oom;
68624 page = alloc_zeroed_user_highpage_movable(vma, address);
68625 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68626 if (!pte_none(*page_table))
68627 goto release;
68628
68629 +#ifdef CONFIG_PAX_SEGMEXEC
68630 + if (pax_find_mirror_vma(vma))
68631 + BUG_ON(!trylock_page(page));
68632 +#endif
68633 +
68634 inc_mm_counter_fast(mm, MM_ANONPAGES);
68635 page_add_new_anon_rmap(page, vma, address);
68636 setpte:
68637 @@ -3116,6 +3296,12 @@ setpte:
68638
68639 /* No need to invalidate - it was non-present before */
68640 update_mmu_cache(vma, address, page_table);
68641 +
68642 +#ifdef CONFIG_PAX_SEGMEXEC
68643 + if (page)
68644 + pax_mirror_anon_pte(vma, address, page, ptl);
68645 +#endif
68646 +
68647 unlock:
68648 pte_unmap_unlock(page_table, ptl);
68649 return 0;
68650 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68651 */
68652 /* Only go through if we didn't race with anybody else... */
68653 if (likely(pte_same(*page_table, orig_pte))) {
68654 +
68655 +#ifdef CONFIG_PAX_SEGMEXEC
68656 + if (anon && pax_find_mirror_vma(vma))
68657 + BUG_ON(!trylock_page(page));
68658 +#endif
68659 +
68660 flush_icache_page(vma, page);
68661 entry = mk_pte(page, vma->vm_page_prot);
68662 if (flags & FAULT_FLAG_WRITE)
68663 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68664
68665 /* no need to invalidate: a not-present page won't be cached */
68666 update_mmu_cache(vma, address, page_table);
68667 +
68668 +#ifdef CONFIG_PAX_SEGMEXEC
68669 + if (anon)
68670 + pax_mirror_anon_pte(vma, address, page, ptl);
68671 + else
68672 + pax_mirror_file_pte(vma, address, page, ptl);
68673 +#endif
68674 +
68675 } else {
68676 if (cow_page)
68677 mem_cgroup_uncharge_page(cow_page);
68678 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68679 if (flags & FAULT_FLAG_WRITE)
68680 flush_tlb_fix_spurious_fault(vma, address);
68681 }
68682 +
68683 +#ifdef CONFIG_PAX_SEGMEXEC
68684 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68685 + return 0;
68686 +#endif
68687 +
68688 unlock:
68689 pte_unmap_unlock(pte, ptl);
68690 return 0;
68691 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68692 pmd_t *pmd;
68693 pte_t *pte;
68694
68695 +#ifdef CONFIG_PAX_SEGMEXEC
68696 + struct vm_area_struct *vma_m;
68697 +#endif
68698 +
68699 __set_current_state(TASK_RUNNING);
68700
68701 count_vm_event(PGFAULT);
68702 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68703 if (unlikely(is_vm_hugetlb_page(vma)))
68704 return hugetlb_fault(mm, vma, address, flags);
68705
68706 +#ifdef CONFIG_PAX_SEGMEXEC
68707 + vma_m = pax_find_mirror_vma(vma);
68708 + if (vma_m) {
68709 + unsigned long address_m;
68710 + pgd_t *pgd_m;
68711 + pud_t *pud_m;
68712 + pmd_t *pmd_m;
68713 +
68714 + if (vma->vm_start > vma_m->vm_start) {
68715 + address_m = address;
68716 + address -= SEGMEXEC_TASK_SIZE;
68717 + vma = vma_m;
68718 + } else
68719 + address_m = address + SEGMEXEC_TASK_SIZE;
68720 +
68721 + pgd_m = pgd_offset(mm, address_m);
68722 + pud_m = pud_alloc(mm, pgd_m, address_m);
68723 + if (!pud_m)
68724 + return VM_FAULT_OOM;
68725 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68726 + if (!pmd_m)
68727 + return VM_FAULT_OOM;
68728 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68729 + return VM_FAULT_OOM;
68730 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68731 + }
68732 +#endif
68733 +
68734 pgd = pgd_offset(mm, address);
68735 pud = pud_alloc(mm, pgd, address);
68736 if (!pud)
68737 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68738 * run pte_offset_map on the pmd, if an huge pmd could
68739 * materialize from under us from a different thread.
68740 */
68741 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68742 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68743 return VM_FAULT_OOM;
68744 /* if an huge pmd materialized from under us just retry later */
68745 if (unlikely(pmd_trans_huge(*pmd)))
68746 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68747 gate_vma.vm_start = FIXADDR_USER_START;
68748 gate_vma.vm_end = FIXADDR_USER_END;
68749 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68750 - gate_vma.vm_page_prot = __P101;
68751 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68752 /*
68753 * Make sure the vDSO gets into every core dump.
68754 * Dumping its contents makes post-mortem fully interpretable later
68755 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68756 index c3fdbcb..2e8ef90 100644
68757 --- a/mm/mempolicy.c
68758 +++ b/mm/mempolicy.c
68759 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68760 unsigned long vmstart;
68761 unsigned long vmend;
68762
68763 +#ifdef CONFIG_PAX_SEGMEXEC
68764 + struct vm_area_struct *vma_m;
68765 +#endif
68766 +
68767 vma = find_vma_prev(mm, start, &prev);
68768 if (!vma || vma->vm_start > start)
68769 return -EFAULT;
68770 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68771 err = policy_vma(vma, new_pol);
68772 if (err)
68773 goto out;
68774 +
68775 +#ifdef CONFIG_PAX_SEGMEXEC
68776 + vma_m = pax_find_mirror_vma(vma);
68777 + if (vma_m) {
68778 + err = policy_vma(vma_m, new_pol);
68779 + if (err)
68780 + goto out;
68781 + }
68782 +#endif
68783 +
68784 }
68785
68786 out:
68787 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68788
68789 if (end < start)
68790 return -EINVAL;
68791 +
68792 +#ifdef CONFIG_PAX_SEGMEXEC
68793 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68794 + if (end > SEGMEXEC_TASK_SIZE)
68795 + return -EINVAL;
68796 + } else
68797 +#endif
68798 +
68799 + if (end > TASK_SIZE)
68800 + return -EINVAL;
68801 +
68802 if (end == start)
68803 return 0;
68804
68805 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68806 if (!mm)
68807 goto out;
68808
68809 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68810 + if (mm != current->mm &&
68811 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68812 + err = -EPERM;
68813 + goto out;
68814 + }
68815 +#endif
68816 +
68817 /*
68818 * Check if this process has the right to modify the specified
68819 * process. The right exists if the process has administrative
68820 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68821 rcu_read_lock();
68822 tcred = __task_cred(task);
68823 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68824 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68825 - !capable(CAP_SYS_NICE)) {
68826 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68827 rcu_read_unlock();
68828 err = -EPERM;
68829 goto out;
68830 diff --git a/mm/migrate.c b/mm/migrate.c
68831 index 177aca4..ab3a744 100644
68832 --- a/mm/migrate.c
68833 +++ b/mm/migrate.c
68834 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68835 if (!mm)
68836 return -EINVAL;
68837
68838 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68839 + if (mm != current->mm &&
68840 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68841 + err = -EPERM;
68842 + goto out;
68843 + }
68844 +#endif
68845 +
68846 /*
68847 * Check if this process has the right to modify the specified
68848 * process. The right exists if the process has administrative
68849 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68850 rcu_read_lock();
68851 tcred = __task_cred(task);
68852 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68853 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68854 - !capable(CAP_SYS_NICE)) {
68855 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68856 rcu_read_unlock();
68857 err = -EPERM;
68858 goto out;
68859 diff --git a/mm/mlock.c b/mm/mlock.c
68860 index 4f4f53b..9511904 100644
68861 --- a/mm/mlock.c
68862 +++ b/mm/mlock.c
68863 @@ -13,6 +13,7 @@
68864 #include <linux/pagemap.h>
68865 #include <linux/mempolicy.h>
68866 #include <linux/syscalls.h>
68867 +#include <linux/security.h>
68868 #include <linux/sched.h>
68869 #include <linux/export.h>
68870 #include <linux/rmap.h>
68871 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68872 return -EINVAL;
68873 if (end == start)
68874 return 0;
68875 + if (end > TASK_SIZE)
68876 + return -EINVAL;
68877 +
68878 vma = find_vma_prev(current->mm, start, &prev);
68879 if (!vma || vma->vm_start > start)
68880 return -ENOMEM;
68881 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68882 for (nstart = start ; ; ) {
68883 vm_flags_t newflags;
68884
68885 +#ifdef CONFIG_PAX_SEGMEXEC
68886 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68887 + break;
68888 +#endif
68889 +
68890 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68891
68892 newflags = vma->vm_flags | VM_LOCKED;
68893 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68894 lock_limit >>= PAGE_SHIFT;
68895
68896 /* check against resource limits */
68897 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68898 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68899 error = do_mlock(start, len, 1);
68900 up_write(&current->mm->mmap_sem);
68901 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68902 static int do_mlockall(int flags)
68903 {
68904 struct vm_area_struct * vma, * prev = NULL;
68905 - unsigned int def_flags = 0;
68906
68907 if (flags & MCL_FUTURE)
68908 - def_flags = VM_LOCKED;
68909 - current->mm->def_flags = def_flags;
68910 + current->mm->def_flags |= VM_LOCKED;
68911 + else
68912 + current->mm->def_flags &= ~VM_LOCKED;
68913 if (flags == MCL_FUTURE)
68914 goto out;
68915
68916 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68917 vm_flags_t newflags;
68918
68919 +#ifdef CONFIG_PAX_SEGMEXEC
68920 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68921 + break;
68922 +#endif
68923 +
68924 + BUG_ON(vma->vm_end > TASK_SIZE);
68925 newflags = vma->vm_flags | VM_LOCKED;
68926 if (!(flags & MCL_CURRENT))
68927 newflags &= ~VM_LOCKED;
68928 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68929 lock_limit >>= PAGE_SHIFT;
68930
68931 ret = -ENOMEM;
68932 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68933 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68934 capable(CAP_IPC_LOCK))
68935 ret = do_mlockall(flags);
68936 diff --git a/mm/mmap.c b/mm/mmap.c
68937 index eae90af..44552cf 100644
68938 --- a/mm/mmap.c
68939 +++ b/mm/mmap.c
68940 @@ -46,6 +46,16 @@
68941 #define arch_rebalance_pgtables(addr, len) (addr)
68942 #endif
68943
68944 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68945 +{
68946 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68947 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68948 + up_read(&mm->mmap_sem);
68949 + BUG();
68950 + }
68951 +#endif
68952 +}
68953 +
68954 static void unmap_region(struct mm_struct *mm,
68955 struct vm_area_struct *vma, struct vm_area_struct *prev,
68956 unsigned long start, unsigned long end);
68957 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68958 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68959 *
68960 */
68961 -pgprot_t protection_map[16] = {
68962 +pgprot_t protection_map[16] __read_only = {
68963 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68964 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68965 };
68966
68967 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68968 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68969 {
68970 - return __pgprot(pgprot_val(protection_map[vm_flags &
68971 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68972 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68973 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68974 +
68975 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68976 + if (!(__supported_pte_mask & _PAGE_NX) &&
68977 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68978 + (vm_flags & (VM_READ | VM_WRITE)))
68979 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68980 +#endif
68981 +
68982 + return prot;
68983 }
68984 EXPORT_SYMBOL(vm_get_page_prot);
68985
68986 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68987 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68988 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68989 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68990 /*
68991 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68992 * other variables. It can be updated by several CPUs frequently.
68993 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68994 struct vm_area_struct *next = vma->vm_next;
68995
68996 might_sleep();
68997 + BUG_ON(vma->vm_mirror);
68998 if (vma->vm_ops && vma->vm_ops->close)
68999 vma->vm_ops->close(vma);
69000 if (vma->vm_file) {
69001 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
69002 * not page aligned -Ram Gupta
69003 */
69004 rlim = rlimit(RLIMIT_DATA);
69005 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
69006 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
69007 (mm->end_data - mm->start_data) > rlim)
69008 goto out;
69009 @@ -689,6 +711,12 @@ static int
69010 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
69011 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69012 {
69013 +
69014 +#ifdef CONFIG_PAX_SEGMEXEC
69015 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
69016 + return 0;
69017 +#endif
69018 +
69019 if (is_mergeable_vma(vma, file, vm_flags) &&
69020 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69021 if (vma->vm_pgoff == vm_pgoff)
69022 @@ -708,6 +736,12 @@ static int
69023 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69024 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
69025 {
69026 +
69027 +#ifdef CONFIG_PAX_SEGMEXEC
69028 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
69029 + return 0;
69030 +#endif
69031 +
69032 if (is_mergeable_vma(vma, file, vm_flags) &&
69033 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
69034 pgoff_t vm_pglen;
69035 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
69036 struct vm_area_struct *vma_merge(struct mm_struct *mm,
69037 struct vm_area_struct *prev, unsigned long addr,
69038 unsigned long end, unsigned long vm_flags,
69039 - struct anon_vma *anon_vma, struct file *file,
69040 + struct anon_vma *anon_vma, struct file *file,
69041 pgoff_t pgoff, struct mempolicy *policy)
69042 {
69043 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
69044 struct vm_area_struct *area, *next;
69045 int err;
69046
69047 +#ifdef CONFIG_PAX_SEGMEXEC
69048 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
69049 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
69050 +
69051 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
69052 +#endif
69053 +
69054 /*
69055 * We later require that vma->vm_flags == vm_flags,
69056 * so this tests vma->vm_flags & VM_SPECIAL, too.
69057 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69058 if (next && next->vm_end == end) /* cases 6, 7, 8 */
69059 next = next->vm_next;
69060
69061 +#ifdef CONFIG_PAX_SEGMEXEC
69062 + if (prev)
69063 + prev_m = pax_find_mirror_vma(prev);
69064 + if (area)
69065 + area_m = pax_find_mirror_vma(area);
69066 + if (next)
69067 + next_m = pax_find_mirror_vma(next);
69068 +#endif
69069 +
69070 /*
69071 * Can it merge with the predecessor?
69072 */
69073 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69074 /* cases 1, 6 */
69075 err = vma_adjust(prev, prev->vm_start,
69076 next->vm_end, prev->vm_pgoff, NULL);
69077 - } else /* cases 2, 5, 7 */
69078 +
69079 +#ifdef CONFIG_PAX_SEGMEXEC
69080 + if (!err && prev_m)
69081 + err = vma_adjust(prev_m, prev_m->vm_start,
69082 + next_m->vm_end, prev_m->vm_pgoff, NULL);
69083 +#endif
69084 +
69085 + } else { /* cases 2, 5, 7 */
69086 err = vma_adjust(prev, prev->vm_start,
69087 end, prev->vm_pgoff, NULL);
69088 +
69089 +#ifdef CONFIG_PAX_SEGMEXEC
69090 + if (!err && prev_m)
69091 + err = vma_adjust(prev_m, prev_m->vm_start,
69092 + end_m, prev_m->vm_pgoff, NULL);
69093 +#endif
69094 +
69095 + }
69096 if (err)
69097 return NULL;
69098 khugepaged_enter_vma_merge(prev);
69099 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
69100 mpol_equal(policy, vma_policy(next)) &&
69101 can_vma_merge_before(next, vm_flags,
69102 anon_vma, file, pgoff+pglen)) {
69103 - if (prev && addr < prev->vm_end) /* case 4 */
69104 + if (prev && addr < prev->vm_end) { /* case 4 */
69105 err = vma_adjust(prev, prev->vm_start,
69106 addr, prev->vm_pgoff, NULL);
69107 - else /* cases 3, 8 */
69108 +
69109 +#ifdef CONFIG_PAX_SEGMEXEC
69110 + if (!err && prev_m)
69111 + err = vma_adjust(prev_m, prev_m->vm_start,
69112 + addr_m, prev_m->vm_pgoff, NULL);
69113 +#endif
69114 +
69115 + } else { /* cases 3, 8 */
69116 err = vma_adjust(area, addr, next->vm_end,
69117 next->vm_pgoff - pglen, NULL);
69118 +
69119 +#ifdef CONFIG_PAX_SEGMEXEC
69120 + if (!err && area_m)
69121 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
69122 + next_m->vm_pgoff - pglen, NULL);
69123 +#endif
69124 +
69125 + }
69126 if (err)
69127 return NULL;
69128 khugepaged_enter_vma_merge(area);
69129 @@ -921,14 +1001,11 @@ none:
69130 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
69131 struct file *file, long pages)
69132 {
69133 - const unsigned long stack_flags
69134 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
69135 -
69136 if (file) {
69137 mm->shared_vm += pages;
69138 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
69139 mm->exec_vm += pages;
69140 - } else if (flags & stack_flags)
69141 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
69142 mm->stack_vm += pages;
69143 if (flags & (VM_RESERVED|VM_IO))
69144 mm->reserved_vm += pages;
69145 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69146 * (the exception is when the underlying filesystem is noexec
69147 * mounted, in which case we dont add PROT_EXEC.)
69148 */
69149 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69150 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69151 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
69152 prot |= PROT_EXEC;
69153
69154 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69155 /* Obtain the address to map to. we verify (or select) it and ensure
69156 * that it represents a valid section of the address space.
69157 */
69158 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
69159 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
69160 if (addr & ~PAGE_MASK)
69161 return addr;
69162
69163 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69164 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
69165 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
69166
69167 +#ifdef CONFIG_PAX_MPROTECT
69168 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69169 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69170 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
69171 + gr_log_rwxmmap(file);
69172 +
69173 +#ifdef CONFIG_PAX_EMUPLT
69174 + vm_flags &= ~VM_EXEC;
69175 +#else
69176 + return -EPERM;
69177 +#endif
69178 +
69179 + }
69180 +
69181 + if (!(vm_flags & VM_EXEC))
69182 + vm_flags &= ~VM_MAYEXEC;
69183 +#else
69184 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69185 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69186 +#endif
69187 + else
69188 + vm_flags &= ~VM_MAYWRITE;
69189 + }
69190 +#endif
69191 +
69192 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69193 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
69194 + vm_flags &= ~VM_PAGEEXEC;
69195 +#endif
69196 +
69197 if (flags & MAP_LOCKED)
69198 if (!can_do_mlock())
69199 return -EPERM;
69200 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69201 locked += mm->locked_vm;
69202 lock_limit = rlimit(RLIMIT_MEMLOCK);
69203 lock_limit >>= PAGE_SHIFT;
69204 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69205 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
69206 return -EAGAIN;
69207 }
69208 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
69209 if (error)
69210 return error;
69211
69212 + if (!gr_acl_handle_mmap(file, prot))
69213 + return -EACCES;
69214 +
69215 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69216 }
69217 EXPORT_SYMBOL(do_mmap_pgoff);
69218 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
69219 vm_flags_t vm_flags = vma->vm_flags;
69220
69221 /* If it was private or non-writable, the write bit is already clear */
69222 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69223 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69224 return 0;
69225
69226 /* The backer wishes to know when pages are first written to? */
69227 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
69228 unsigned long charged = 0;
69229 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69230
69231 +#ifdef CONFIG_PAX_SEGMEXEC
69232 + struct vm_area_struct *vma_m = NULL;
69233 +#endif
69234 +
69235 + /*
69236 + * mm->mmap_sem is required to protect against another thread
69237 + * changing the mappings in case we sleep.
69238 + */
69239 + verify_mm_writelocked(mm);
69240 +
69241 /* Clear old maps */
69242 error = -ENOMEM;
69243 -munmap_back:
69244 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69245 if (vma && vma->vm_start < addr + len) {
69246 if (do_munmap(mm, addr, len))
69247 return -ENOMEM;
69248 - goto munmap_back;
69249 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69250 + BUG_ON(vma && vma->vm_start < addr + len);
69251 }
69252
69253 /* Check against address space limit. */
69254 @@ -1258,6 +1379,16 @@ munmap_back:
69255 goto unacct_error;
69256 }
69257
69258 +#ifdef CONFIG_PAX_SEGMEXEC
69259 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69260 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69261 + if (!vma_m) {
69262 + error = -ENOMEM;
69263 + goto free_vma;
69264 + }
69265 + }
69266 +#endif
69267 +
69268 vma->vm_mm = mm;
69269 vma->vm_start = addr;
69270 vma->vm_end = addr + len;
69271 @@ -1281,6 +1412,19 @@ munmap_back:
69272 error = file->f_op->mmap(file, vma);
69273 if (error)
69274 goto unmap_and_free_vma;
69275 +
69276 +#ifdef CONFIG_PAX_SEGMEXEC
69277 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69278 + added_exe_file_vma(mm);
69279 +#endif
69280 +
69281 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69282 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69283 + vma->vm_flags |= VM_PAGEEXEC;
69284 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69285 + }
69286 +#endif
69287 +
69288 if (vm_flags & VM_EXECUTABLE)
69289 added_exe_file_vma(mm);
69290
69291 @@ -1316,6 +1460,11 @@ munmap_back:
69292 vma_link(mm, vma, prev, rb_link, rb_parent);
69293 file = vma->vm_file;
69294
69295 +#ifdef CONFIG_PAX_SEGMEXEC
69296 + if (vma_m)
69297 + BUG_ON(pax_mirror_vma(vma_m, vma));
69298 +#endif
69299 +
69300 /* Once vma denies write, undo our temporary denial count */
69301 if (correct_wcount)
69302 atomic_inc(&inode->i_writecount);
69303 @@ -1324,6 +1473,7 @@ out:
69304
69305 mm->total_vm += len >> PAGE_SHIFT;
69306 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69307 + track_exec_limit(mm, addr, addr + len, vm_flags);
69308 if (vm_flags & VM_LOCKED) {
69309 if (!mlock_vma_pages_range(vma, addr, addr + len))
69310 mm->locked_vm += (len >> PAGE_SHIFT);
69311 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69312 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69313 charged = 0;
69314 free_vma:
69315 +
69316 +#ifdef CONFIG_PAX_SEGMEXEC
69317 + if (vma_m)
69318 + kmem_cache_free(vm_area_cachep, vma_m);
69319 +#endif
69320 +
69321 kmem_cache_free(vm_area_cachep, vma);
69322 unacct_error:
69323 if (charged)
69324 @@ -1348,6 +1504,44 @@ unacct_error:
69325 return error;
69326 }
69327
69328 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69329 +{
69330 + if (!vma) {
69331 +#ifdef CONFIG_STACK_GROWSUP
69332 + if (addr > sysctl_heap_stack_gap)
69333 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69334 + else
69335 + vma = find_vma(current->mm, 0);
69336 + if (vma && (vma->vm_flags & VM_GROWSUP))
69337 + return false;
69338 +#endif
69339 + return true;
69340 + }
69341 +
69342 + if (addr + len > vma->vm_start)
69343 + return false;
69344 +
69345 + if (vma->vm_flags & VM_GROWSDOWN)
69346 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69347 +#ifdef CONFIG_STACK_GROWSUP
69348 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69349 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69350 +#endif
69351 +
69352 + return true;
69353 +}
69354 +
69355 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69356 +{
69357 + if (vma->vm_start < len)
69358 + return -ENOMEM;
69359 + if (!(vma->vm_flags & VM_GROWSDOWN))
69360 + return vma->vm_start - len;
69361 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69362 + return vma->vm_start - len - sysctl_heap_stack_gap;
69363 + return -ENOMEM;
69364 +}
69365 +
69366 /* Get an address range which is currently unmapped.
69367 * For shmat() with addr=0.
69368 *
69369 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69370 if (flags & MAP_FIXED)
69371 return addr;
69372
69373 +#ifdef CONFIG_PAX_RANDMMAP
69374 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69375 +#endif
69376 +
69377 if (addr) {
69378 addr = PAGE_ALIGN(addr);
69379 - vma = find_vma(mm, addr);
69380 - if (TASK_SIZE - len >= addr &&
69381 - (!vma || addr + len <= vma->vm_start))
69382 - return addr;
69383 + if (TASK_SIZE - len >= addr) {
69384 + vma = find_vma(mm, addr);
69385 + if (check_heap_stack_gap(vma, addr, len))
69386 + return addr;
69387 + }
69388 }
69389 if (len > mm->cached_hole_size) {
69390 - start_addr = addr = mm->free_area_cache;
69391 + start_addr = addr = mm->free_area_cache;
69392 } else {
69393 - start_addr = addr = TASK_UNMAPPED_BASE;
69394 - mm->cached_hole_size = 0;
69395 + start_addr = addr = mm->mmap_base;
69396 + mm->cached_hole_size = 0;
69397 }
69398
69399 full_search:
69400 @@ -1396,34 +1595,40 @@ full_search:
69401 * Start a new search - just in case we missed
69402 * some holes.
69403 */
69404 - if (start_addr != TASK_UNMAPPED_BASE) {
69405 - addr = TASK_UNMAPPED_BASE;
69406 - start_addr = addr;
69407 + if (start_addr != mm->mmap_base) {
69408 + start_addr = addr = mm->mmap_base;
69409 mm->cached_hole_size = 0;
69410 goto full_search;
69411 }
69412 return -ENOMEM;
69413 }
69414 - if (!vma || addr + len <= vma->vm_start) {
69415 - /*
69416 - * Remember the place where we stopped the search:
69417 - */
69418 - mm->free_area_cache = addr + len;
69419 - return addr;
69420 - }
69421 + if (check_heap_stack_gap(vma, addr, len))
69422 + break;
69423 if (addr + mm->cached_hole_size < vma->vm_start)
69424 mm->cached_hole_size = vma->vm_start - addr;
69425 addr = vma->vm_end;
69426 }
69427 +
69428 + /*
69429 + * Remember the place where we stopped the search:
69430 + */
69431 + mm->free_area_cache = addr + len;
69432 + return addr;
69433 }
69434 #endif
69435
69436 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69437 {
69438 +
69439 +#ifdef CONFIG_PAX_SEGMEXEC
69440 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69441 + return;
69442 +#endif
69443 +
69444 /*
69445 * Is this a new hole at the lowest possible address?
69446 */
69447 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69448 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69449 mm->free_area_cache = addr;
69450 mm->cached_hole_size = ~0UL;
69451 }
69452 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69453 {
69454 struct vm_area_struct *vma;
69455 struct mm_struct *mm = current->mm;
69456 - unsigned long addr = addr0;
69457 + unsigned long base = mm->mmap_base, addr = addr0;
69458
69459 /* requested length too big for entire address space */
69460 if (len > TASK_SIZE)
69461 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69462 if (flags & MAP_FIXED)
69463 return addr;
69464
69465 +#ifdef CONFIG_PAX_RANDMMAP
69466 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69467 +#endif
69468 +
69469 /* requesting a specific address */
69470 if (addr) {
69471 addr = PAGE_ALIGN(addr);
69472 - vma = find_vma(mm, addr);
69473 - if (TASK_SIZE - len >= addr &&
69474 - (!vma || addr + len <= vma->vm_start))
69475 - return addr;
69476 + if (TASK_SIZE - len >= addr) {
69477 + vma = find_vma(mm, addr);
69478 + if (check_heap_stack_gap(vma, addr, len))
69479 + return addr;
69480 + }
69481 }
69482
69483 /* check if free_area_cache is useful for us */
69484 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69485 /* make sure it can fit in the remaining address space */
69486 if (addr > len) {
69487 vma = find_vma(mm, addr-len);
69488 - if (!vma || addr <= vma->vm_start)
69489 + if (check_heap_stack_gap(vma, addr - len, len))
69490 /* remember the address as a hint for next time */
69491 return (mm->free_area_cache = addr-len);
69492 }
69493 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69494 * return with success:
69495 */
69496 vma = find_vma(mm, addr);
69497 - if (!vma || addr+len <= vma->vm_start)
69498 + if (check_heap_stack_gap(vma, addr, len))
69499 /* remember the address as a hint for next time */
69500 return (mm->free_area_cache = addr);
69501
69502 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69503 mm->cached_hole_size = vma->vm_start - addr;
69504
69505 /* try just below the current vma->vm_start */
69506 - addr = vma->vm_start-len;
69507 - } while (len < vma->vm_start);
69508 + addr = skip_heap_stack_gap(vma, len);
69509 + } while (!IS_ERR_VALUE(addr));
69510
69511 bottomup:
69512 /*
69513 @@ -1507,13 +1717,21 @@ bottomup:
69514 * can happen with large stack limits and large mmap()
69515 * allocations.
69516 */
69517 + mm->mmap_base = TASK_UNMAPPED_BASE;
69518 +
69519 +#ifdef CONFIG_PAX_RANDMMAP
69520 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69521 + mm->mmap_base += mm->delta_mmap;
69522 +#endif
69523 +
69524 + mm->free_area_cache = mm->mmap_base;
69525 mm->cached_hole_size = ~0UL;
69526 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69527 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69528 /*
69529 * Restore the topdown base:
69530 */
69531 - mm->free_area_cache = mm->mmap_base;
69532 + mm->mmap_base = base;
69533 + mm->free_area_cache = base;
69534 mm->cached_hole_size = ~0UL;
69535
69536 return addr;
69537 @@ -1522,6 +1740,12 @@ bottomup:
69538
69539 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69540 {
69541 +
69542 +#ifdef CONFIG_PAX_SEGMEXEC
69543 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69544 + return;
69545 +#endif
69546 +
69547 /*
69548 * Is this a new hole at the highest possible address?
69549 */
69550 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69551 mm->free_area_cache = addr;
69552
69553 /* dont allow allocations above current base */
69554 - if (mm->free_area_cache > mm->mmap_base)
69555 + if (mm->free_area_cache > mm->mmap_base) {
69556 mm->free_area_cache = mm->mmap_base;
69557 + mm->cached_hole_size = ~0UL;
69558 + }
69559 }
69560
69561 unsigned long
69562 @@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69563
69564 EXPORT_SYMBOL(find_vma);
69565
69566 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69567 +/*
69568 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69569 + * Note: pprev is set to NULL when return value is NULL.
69570 + */
69571 struct vm_area_struct *
69572 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69573 struct vm_area_struct **pprev)
69574 {
69575 - struct vm_area_struct *vma = NULL, *prev = NULL;
69576 - struct rb_node *rb_node;
69577 - if (!mm)
69578 - goto out;
69579 + struct vm_area_struct *vma;
69580
69581 - /* Guard against addr being lower than the first VMA */
69582 - vma = mm->mmap;
69583 + vma = find_vma(mm, addr);
69584 + *pprev = vma ? vma->vm_prev : NULL;
69585 + return vma;
69586 +}
69587
69588 - /* Go through the RB tree quickly. */
69589 - rb_node = mm->mm_rb.rb_node;
69590 +#ifdef CONFIG_PAX_SEGMEXEC
69591 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69592 +{
69593 + struct vm_area_struct *vma_m;
69594
69595 - while (rb_node) {
69596 - struct vm_area_struct *vma_tmp;
69597 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69598 -
69599 - if (addr < vma_tmp->vm_end) {
69600 - rb_node = rb_node->rb_left;
69601 - } else {
69602 - prev = vma_tmp;
69603 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69604 - break;
69605 - rb_node = rb_node->rb_right;
69606 - }
69607 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69608 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69609 + BUG_ON(vma->vm_mirror);
69610 + return NULL;
69611 }
69612 -
69613 -out:
69614 - *pprev = prev;
69615 - return prev ? prev->vm_next : vma;
69616 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69617 + vma_m = vma->vm_mirror;
69618 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69619 + BUG_ON(vma->vm_file != vma_m->vm_file);
69620 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69621 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69622 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69623 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69624 + return vma_m;
69625 }
69626 +#endif
69627
69628 /*
69629 * Verify that the stack growth is acceptable and
69630 @@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69631 return -ENOMEM;
69632
69633 /* Stack limit test */
69634 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69635 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69636 return -ENOMEM;
69637
69638 @@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69639 locked = mm->locked_vm + grow;
69640 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69641 limit >>= PAGE_SHIFT;
69642 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69643 if (locked > limit && !capable(CAP_IPC_LOCK))
69644 return -ENOMEM;
69645 }
69646 @@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69647 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69648 * vma is the last one with address > vma->vm_end. Have to extend vma.
69649 */
69650 +#ifndef CONFIG_IA64
69651 +static
69652 +#endif
69653 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69654 {
69655 int error;
69656 + bool locknext;
69657
69658 if (!(vma->vm_flags & VM_GROWSUP))
69659 return -EFAULT;
69660
69661 + /* Also guard against wrapping around to address 0. */
69662 + if (address < PAGE_ALIGN(address+1))
69663 + address = PAGE_ALIGN(address+1);
69664 + else
69665 + return -ENOMEM;
69666 +
69667 /*
69668 * We must make sure the anon_vma is allocated
69669 * so that the anon_vma locking is not a noop.
69670 */
69671 if (unlikely(anon_vma_prepare(vma)))
69672 return -ENOMEM;
69673 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69674 + if (locknext && anon_vma_prepare(vma->vm_next))
69675 + return -ENOMEM;
69676 vma_lock_anon_vma(vma);
69677 + if (locknext)
69678 + vma_lock_anon_vma(vma->vm_next);
69679
69680 /*
69681 * vma->vm_start/vm_end cannot change under us because the caller
69682 * is required to hold the mmap_sem in read mode. We need the
69683 - * anon_vma lock to serialize against concurrent expand_stacks.
69684 - * Also guard against wrapping around to address 0.
69685 + * anon_vma locks to serialize against concurrent expand_stacks
69686 + * and expand_upwards.
69687 */
69688 - if (address < PAGE_ALIGN(address+4))
69689 - address = PAGE_ALIGN(address+4);
69690 - else {
69691 - vma_unlock_anon_vma(vma);
69692 - return -ENOMEM;
69693 - }
69694 error = 0;
69695
69696 /* Somebody else might have raced and expanded it already */
69697 - if (address > vma->vm_end) {
69698 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69699 + error = -ENOMEM;
69700 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69701 unsigned long size, grow;
69702
69703 size = address - vma->vm_start;
69704 @@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69705 }
69706 }
69707 }
69708 + if (locknext)
69709 + vma_unlock_anon_vma(vma->vm_next);
69710 vma_unlock_anon_vma(vma);
69711 khugepaged_enter_vma_merge(vma);
69712 return error;
69713 @@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69714 unsigned long address)
69715 {
69716 int error;
69717 + bool lockprev = false;
69718 + struct vm_area_struct *prev;
69719
69720 /*
69721 * We must make sure the anon_vma is allocated
69722 @@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69723 if (error)
69724 return error;
69725
69726 + prev = vma->vm_prev;
69727 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69728 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69729 +#endif
69730 + if (lockprev && anon_vma_prepare(prev))
69731 + return -ENOMEM;
69732 + if (lockprev)
69733 + vma_lock_anon_vma(prev);
69734 +
69735 vma_lock_anon_vma(vma);
69736
69737 /*
69738 @@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69739 */
69740
69741 /* Somebody else might have raced and expanded it already */
69742 - if (address < vma->vm_start) {
69743 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69744 + error = -ENOMEM;
69745 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69746 unsigned long size, grow;
69747
69748 +#ifdef CONFIG_PAX_SEGMEXEC
69749 + struct vm_area_struct *vma_m;
69750 +
69751 + vma_m = pax_find_mirror_vma(vma);
69752 +#endif
69753 +
69754 size = vma->vm_end - address;
69755 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69756
69757 @@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69758 if (!error) {
69759 vma->vm_start = address;
69760 vma->vm_pgoff -= grow;
69761 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69762 +
69763 +#ifdef CONFIG_PAX_SEGMEXEC
69764 + if (vma_m) {
69765 + vma_m->vm_start -= grow << PAGE_SHIFT;
69766 + vma_m->vm_pgoff -= grow;
69767 + }
69768 +#endif
69769 +
69770 perf_event_mmap(vma);
69771 }
69772 }
69773 }
69774 vma_unlock_anon_vma(vma);
69775 + if (lockprev)
69776 + vma_unlock_anon_vma(prev);
69777 khugepaged_enter_vma_merge(vma);
69778 return error;
69779 }
69780 @@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69781 do {
69782 long nrpages = vma_pages(vma);
69783
69784 +#ifdef CONFIG_PAX_SEGMEXEC
69785 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69786 + vma = remove_vma(vma);
69787 + continue;
69788 + }
69789 +#endif
69790 +
69791 mm->total_vm -= nrpages;
69792 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69793 vma = remove_vma(vma);
69794 @@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69795 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69796 vma->vm_prev = NULL;
69797 do {
69798 +
69799 +#ifdef CONFIG_PAX_SEGMEXEC
69800 + if (vma->vm_mirror) {
69801 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69802 + vma->vm_mirror->vm_mirror = NULL;
69803 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69804 + vma->vm_mirror = NULL;
69805 + }
69806 +#endif
69807 +
69808 rb_erase(&vma->vm_rb, &mm->mm_rb);
69809 mm->map_count--;
69810 tail_vma = vma;
69811 @@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69812 struct vm_area_struct *new;
69813 int err = -ENOMEM;
69814
69815 +#ifdef CONFIG_PAX_SEGMEXEC
69816 + struct vm_area_struct *vma_m, *new_m = NULL;
69817 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69818 +#endif
69819 +
69820 if (is_vm_hugetlb_page(vma) && (addr &
69821 ~(huge_page_mask(hstate_vma(vma)))))
69822 return -EINVAL;
69823
69824 +#ifdef CONFIG_PAX_SEGMEXEC
69825 + vma_m = pax_find_mirror_vma(vma);
69826 +#endif
69827 +
69828 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69829 if (!new)
69830 goto out_err;
69831
69832 +#ifdef CONFIG_PAX_SEGMEXEC
69833 + if (vma_m) {
69834 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69835 + if (!new_m) {
69836 + kmem_cache_free(vm_area_cachep, new);
69837 + goto out_err;
69838 + }
69839 + }
69840 +#endif
69841 +
69842 /* most fields are the same, copy all, and then fixup */
69843 *new = *vma;
69844
69845 @@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69846 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69847 }
69848
69849 +#ifdef CONFIG_PAX_SEGMEXEC
69850 + if (vma_m) {
69851 + *new_m = *vma_m;
69852 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69853 + new_m->vm_mirror = new;
69854 + new->vm_mirror = new_m;
69855 +
69856 + if (new_below)
69857 + new_m->vm_end = addr_m;
69858 + else {
69859 + new_m->vm_start = addr_m;
69860 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69861 + }
69862 + }
69863 +#endif
69864 +
69865 pol = mpol_dup(vma_policy(vma));
69866 if (IS_ERR(pol)) {
69867 err = PTR_ERR(pol);
69868 @@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69869 else
69870 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69871
69872 +#ifdef CONFIG_PAX_SEGMEXEC
69873 + if (!err && vma_m) {
69874 + if (anon_vma_clone(new_m, vma_m))
69875 + goto out_free_mpol;
69876 +
69877 + mpol_get(pol);
69878 + vma_set_policy(new_m, pol);
69879 +
69880 + if (new_m->vm_file) {
69881 + get_file(new_m->vm_file);
69882 + if (vma_m->vm_flags & VM_EXECUTABLE)
69883 + added_exe_file_vma(mm);
69884 + }
69885 +
69886 + if (new_m->vm_ops && new_m->vm_ops->open)
69887 + new_m->vm_ops->open(new_m);
69888 +
69889 + if (new_below)
69890 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69891 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69892 + else
69893 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69894 +
69895 + if (err) {
69896 + if (new_m->vm_ops && new_m->vm_ops->close)
69897 + new_m->vm_ops->close(new_m);
69898 + if (new_m->vm_file) {
69899 + if (vma_m->vm_flags & VM_EXECUTABLE)
69900 + removed_exe_file_vma(mm);
69901 + fput(new_m->vm_file);
69902 + }
69903 + mpol_put(pol);
69904 + }
69905 + }
69906 +#endif
69907 +
69908 /* Success. */
69909 if (!err)
69910 return 0;
69911 @@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69912 removed_exe_file_vma(mm);
69913 fput(new->vm_file);
69914 }
69915 - unlink_anon_vmas(new);
69916 out_free_mpol:
69917 mpol_put(pol);
69918 out_free_vma:
69919 +
69920 +#ifdef CONFIG_PAX_SEGMEXEC
69921 + if (new_m) {
69922 + unlink_anon_vmas(new_m);
69923 + kmem_cache_free(vm_area_cachep, new_m);
69924 + }
69925 +#endif
69926 +
69927 + unlink_anon_vmas(new);
69928 kmem_cache_free(vm_area_cachep, new);
69929 out_err:
69930 return err;
69931 @@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69932 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69933 unsigned long addr, int new_below)
69934 {
69935 +
69936 +#ifdef CONFIG_PAX_SEGMEXEC
69937 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69938 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69939 + if (mm->map_count >= sysctl_max_map_count-1)
69940 + return -ENOMEM;
69941 + } else
69942 +#endif
69943 +
69944 if (mm->map_count >= sysctl_max_map_count)
69945 return -ENOMEM;
69946
69947 @@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69948 * work. This now handles partial unmappings.
69949 * Jeremy Fitzhardinge <jeremy@goop.org>
69950 */
69951 +#ifdef CONFIG_PAX_SEGMEXEC
69952 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69953 {
69954 + int ret = __do_munmap(mm, start, len);
69955 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69956 + return ret;
69957 +
69958 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69959 +}
69960 +
69961 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69962 +#else
69963 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69964 +#endif
69965 +{
69966 unsigned long end;
69967 struct vm_area_struct *vma, *prev, *last;
69968
69969 + /*
69970 + * mm->mmap_sem is required to protect against another thread
69971 + * changing the mappings in case we sleep.
69972 + */
69973 + verify_mm_writelocked(mm);
69974 +
69975 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69976 return -EINVAL;
69977
69978 @@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69979 /* Fix up all other VM information */
69980 remove_vma_list(mm, vma);
69981
69982 + track_exec_limit(mm, start, end, 0UL);
69983 +
69984 return 0;
69985 }
69986
69987 @@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69988
69989 profile_munmap(addr);
69990
69991 +#ifdef CONFIG_PAX_SEGMEXEC
69992 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69993 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69994 + return -EINVAL;
69995 +#endif
69996 +
69997 down_write(&mm->mmap_sem);
69998 ret = do_munmap(mm, addr, len);
69999 up_write(&mm->mmap_sem);
70000 return ret;
70001 }
70002
70003 -static inline void verify_mm_writelocked(struct mm_struct *mm)
70004 -{
70005 -#ifdef CONFIG_DEBUG_VM
70006 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70007 - WARN_ON(1);
70008 - up_read(&mm->mmap_sem);
70009 - }
70010 -#endif
70011 -}
70012 -
70013 /*
70014 * this is really a simplified "do_mmap". it only handles
70015 * anonymous maps. eventually we may be able to do some
70016 @@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70017 struct rb_node ** rb_link, * rb_parent;
70018 pgoff_t pgoff = addr >> PAGE_SHIFT;
70019 int error;
70020 + unsigned long charged;
70021
70022 len = PAGE_ALIGN(len);
70023 if (!len)
70024 @@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70025
70026 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
70027
70028 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
70029 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
70030 + flags &= ~VM_EXEC;
70031 +
70032 +#ifdef CONFIG_PAX_MPROTECT
70033 + if (mm->pax_flags & MF_PAX_MPROTECT)
70034 + flags &= ~VM_MAYEXEC;
70035 +#endif
70036 +
70037 + }
70038 +#endif
70039 +
70040 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
70041 if (error & ~PAGE_MASK)
70042 return error;
70043
70044 + charged = len >> PAGE_SHIFT;
70045 +
70046 /*
70047 * mlock MCL_FUTURE?
70048 */
70049 if (mm->def_flags & VM_LOCKED) {
70050 unsigned long locked, lock_limit;
70051 - locked = len >> PAGE_SHIFT;
70052 + locked = charged;
70053 locked += mm->locked_vm;
70054 lock_limit = rlimit(RLIMIT_MEMLOCK);
70055 lock_limit >>= PAGE_SHIFT;
70056 @@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70057 /*
70058 * Clear old maps. this also does some error checking for us
70059 */
70060 - munmap_back:
70061 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70062 if (vma && vma->vm_start < addr + len) {
70063 if (do_munmap(mm, addr, len))
70064 return -ENOMEM;
70065 - goto munmap_back;
70066 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70067 + BUG_ON(vma && vma->vm_start < addr + len);
70068 }
70069
70070 /* Check against address space limits *after* clearing old maps... */
70071 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
70072 + if (!may_expand_vm(mm, charged))
70073 return -ENOMEM;
70074
70075 if (mm->map_count > sysctl_max_map_count)
70076 return -ENOMEM;
70077
70078 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
70079 + if (security_vm_enough_memory(charged))
70080 return -ENOMEM;
70081
70082 /* Can we just expand an old private anonymous mapping? */
70083 @@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70084 */
70085 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70086 if (!vma) {
70087 - vm_unacct_memory(len >> PAGE_SHIFT);
70088 + vm_unacct_memory(charged);
70089 return -ENOMEM;
70090 }
70091
70092 @@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
70093 vma_link(mm, vma, prev, rb_link, rb_parent);
70094 out:
70095 perf_event_mmap(vma);
70096 - mm->total_vm += len >> PAGE_SHIFT;
70097 + mm->total_vm += charged;
70098 if (flags & VM_LOCKED) {
70099 if (!mlock_vma_pages_range(vma, addr, addr + len))
70100 - mm->locked_vm += (len >> PAGE_SHIFT);
70101 + mm->locked_vm += charged;
70102 }
70103 + track_exec_limit(mm, addr, addr + len, flags);
70104 return addr;
70105 }
70106
70107 @@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
70108 * Walk the list again, actually closing and freeing it,
70109 * with preemption enabled, without holding any MM locks.
70110 */
70111 - while (vma)
70112 + while (vma) {
70113 + vma->vm_mirror = NULL;
70114 vma = remove_vma(vma);
70115 + }
70116
70117 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
70118 }
70119 @@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70120 struct vm_area_struct * __vma, * prev;
70121 struct rb_node ** rb_link, * rb_parent;
70122
70123 +#ifdef CONFIG_PAX_SEGMEXEC
70124 + struct vm_area_struct *vma_m = NULL;
70125 +#endif
70126 +
70127 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
70128 + return -EPERM;
70129 +
70130 /*
70131 * The vm_pgoff of a purely anonymous vma should be irrelevant
70132 * until its first write fault, when page's anon_vma and index
70133 @@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
70134 if ((vma->vm_flags & VM_ACCOUNT) &&
70135 security_vm_enough_memory_mm(mm, vma_pages(vma)))
70136 return -ENOMEM;
70137 +
70138 +#ifdef CONFIG_PAX_SEGMEXEC
70139 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
70140 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70141 + if (!vma_m)
70142 + return -ENOMEM;
70143 + }
70144 +#endif
70145 +
70146 vma_link(mm, vma, prev, rb_link, rb_parent);
70147 +
70148 +#ifdef CONFIG_PAX_SEGMEXEC
70149 + if (vma_m)
70150 + BUG_ON(pax_mirror_vma(vma_m, vma));
70151 +#endif
70152 +
70153 return 0;
70154 }
70155
70156 @@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70157 struct rb_node **rb_link, *rb_parent;
70158 struct mempolicy *pol;
70159
70160 + BUG_ON(vma->vm_mirror);
70161 +
70162 /*
70163 * If anonymous vma has not yet been faulted, update new pgoff
70164 * to match new location, to increase its chance of merging.
70165 @@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
70166 return NULL;
70167 }
70168
70169 +#ifdef CONFIG_PAX_SEGMEXEC
70170 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
70171 +{
70172 + struct vm_area_struct *prev_m;
70173 + struct rb_node **rb_link_m, *rb_parent_m;
70174 + struct mempolicy *pol_m;
70175 +
70176 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
70177 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
70178 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
70179 + *vma_m = *vma;
70180 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
70181 + if (anon_vma_clone(vma_m, vma))
70182 + return -ENOMEM;
70183 + pol_m = vma_policy(vma_m);
70184 + mpol_get(pol_m);
70185 + vma_set_policy(vma_m, pol_m);
70186 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
70187 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
70188 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
70189 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
70190 + if (vma_m->vm_file)
70191 + get_file(vma_m->vm_file);
70192 + if (vma_m->vm_ops && vma_m->vm_ops->open)
70193 + vma_m->vm_ops->open(vma_m);
70194 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
70195 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
70196 + vma_m->vm_mirror = vma;
70197 + vma->vm_mirror = vma_m;
70198 + return 0;
70199 +}
70200 +#endif
70201 +
70202 /*
70203 * Return true if the calling process may expand its vm space by the passed
70204 * number of pages
70205 @@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
70206 unsigned long lim;
70207
70208 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
70209 -
70210 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
70211 if (cur + npages > lim)
70212 return 0;
70213 return 1;
70214 @@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
70215 vma->vm_start = addr;
70216 vma->vm_end = addr + len;
70217
70218 +#ifdef CONFIG_PAX_MPROTECT
70219 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70220 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70221 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
70222 + return -EPERM;
70223 + if (!(vm_flags & VM_EXEC))
70224 + vm_flags &= ~VM_MAYEXEC;
70225 +#else
70226 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70227 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70228 +#endif
70229 + else
70230 + vm_flags &= ~VM_MAYWRITE;
70231 + }
70232 +#endif
70233 +
70234 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
70235 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70236
70237 diff --git a/mm/mprotect.c b/mm/mprotect.c
70238 index 5a688a2..27e031c 100644
70239 --- a/mm/mprotect.c
70240 +++ b/mm/mprotect.c
70241 @@ -23,10 +23,16 @@
70242 #include <linux/mmu_notifier.h>
70243 #include <linux/migrate.h>
70244 #include <linux/perf_event.h>
70245 +
70246 +#ifdef CONFIG_PAX_MPROTECT
70247 +#include <linux/elf.h>
70248 +#endif
70249 +
70250 #include <asm/uaccess.h>
70251 #include <asm/pgtable.h>
70252 #include <asm/cacheflush.h>
70253 #include <asm/tlbflush.h>
70254 +#include <asm/mmu_context.h>
70255
70256 #ifndef pgprot_modify
70257 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70258 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70259 flush_tlb_range(vma, start, end);
70260 }
70261
70262 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70263 +/* called while holding the mmap semaphor for writing except stack expansion */
70264 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70265 +{
70266 + unsigned long oldlimit, newlimit = 0UL;
70267 +
70268 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70269 + return;
70270 +
70271 + spin_lock(&mm->page_table_lock);
70272 + oldlimit = mm->context.user_cs_limit;
70273 + if ((prot & VM_EXEC) && oldlimit < end)
70274 + /* USER_CS limit moved up */
70275 + newlimit = end;
70276 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70277 + /* USER_CS limit moved down */
70278 + newlimit = start;
70279 +
70280 + if (newlimit) {
70281 + mm->context.user_cs_limit = newlimit;
70282 +
70283 +#ifdef CONFIG_SMP
70284 + wmb();
70285 + cpus_clear(mm->context.cpu_user_cs_mask);
70286 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70287 +#endif
70288 +
70289 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70290 + }
70291 + spin_unlock(&mm->page_table_lock);
70292 + if (newlimit == end) {
70293 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
70294 +
70295 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
70296 + if (is_vm_hugetlb_page(vma))
70297 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70298 + else
70299 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70300 + }
70301 +}
70302 +#endif
70303 +
70304 int
70305 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70306 unsigned long start, unsigned long end, unsigned long newflags)
70307 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70308 int error;
70309 int dirty_accountable = 0;
70310
70311 +#ifdef CONFIG_PAX_SEGMEXEC
70312 + struct vm_area_struct *vma_m = NULL;
70313 + unsigned long start_m, end_m;
70314 +
70315 + start_m = start + SEGMEXEC_TASK_SIZE;
70316 + end_m = end + SEGMEXEC_TASK_SIZE;
70317 +#endif
70318 +
70319 if (newflags == oldflags) {
70320 *pprev = vma;
70321 return 0;
70322 }
70323
70324 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70325 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70326 +
70327 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70328 + return -ENOMEM;
70329 +
70330 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70331 + return -ENOMEM;
70332 + }
70333 +
70334 /*
70335 * If we make a private mapping writable we increase our commit;
70336 * but (without finer accounting) cannot reduce our commit if we
70337 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70338 }
70339 }
70340
70341 +#ifdef CONFIG_PAX_SEGMEXEC
70342 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70343 + if (start != vma->vm_start) {
70344 + error = split_vma(mm, vma, start, 1);
70345 + if (error)
70346 + goto fail;
70347 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70348 + *pprev = (*pprev)->vm_next;
70349 + }
70350 +
70351 + if (end != vma->vm_end) {
70352 + error = split_vma(mm, vma, end, 0);
70353 + if (error)
70354 + goto fail;
70355 + }
70356 +
70357 + if (pax_find_mirror_vma(vma)) {
70358 + error = __do_munmap(mm, start_m, end_m - start_m);
70359 + if (error)
70360 + goto fail;
70361 + } else {
70362 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70363 + if (!vma_m) {
70364 + error = -ENOMEM;
70365 + goto fail;
70366 + }
70367 + vma->vm_flags = newflags;
70368 + error = pax_mirror_vma(vma_m, vma);
70369 + if (error) {
70370 + vma->vm_flags = oldflags;
70371 + goto fail;
70372 + }
70373 + }
70374 + }
70375 +#endif
70376 +
70377 /*
70378 * First try to merge with previous and/or next vma.
70379 */
70380 @@ -204,9 +306,21 @@ success:
70381 * vm_flags and vm_page_prot are protected by the mmap_sem
70382 * held in write mode.
70383 */
70384 +
70385 +#ifdef CONFIG_PAX_SEGMEXEC
70386 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70387 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70388 +#endif
70389 +
70390 vma->vm_flags = newflags;
70391 +
70392 +#ifdef CONFIG_PAX_MPROTECT
70393 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70394 + mm->binfmt->handle_mprotect(vma, newflags);
70395 +#endif
70396 +
70397 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70398 - vm_get_page_prot(newflags));
70399 + vm_get_page_prot(vma->vm_flags));
70400
70401 if (vma_wants_writenotify(vma)) {
70402 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70403 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70404 end = start + len;
70405 if (end <= start)
70406 return -ENOMEM;
70407 +
70408 +#ifdef CONFIG_PAX_SEGMEXEC
70409 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70410 + if (end > SEGMEXEC_TASK_SIZE)
70411 + return -EINVAL;
70412 + } else
70413 +#endif
70414 +
70415 + if (end > TASK_SIZE)
70416 + return -EINVAL;
70417 +
70418 if (!arch_validate_prot(prot))
70419 return -EINVAL;
70420
70421 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70422 /*
70423 * Does the application expect PROT_READ to imply PROT_EXEC:
70424 */
70425 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70426 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70427 prot |= PROT_EXEC;
70428
70429 vm_flags = calc_vm_prot_bits(prot);
70430 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70431 if (start > vma->vm_start)
70432 prev = vma;
70433
70434 +#ifdef CONFIG_PAX_MPROTECT
70435 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70436 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70437 +#endif
70438 +
70439 for (nstart = start ; ; ) {
70440 unsigned long newflags;
70441
70442 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70443
70444 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70445 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70446 + if (prot & (PROT_WRITE | PROT_EXEC))
70447 + gr_log_rwxmprotect(vma->vm_file);
70448 +
70449 + error = -EACCES;
70450 + goto out;
70451 + }
70452 +
70453 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70454 error = -EACCES;
70455 goto out;
70456 }
70457 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70458 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70459 if (error)
70460 goto out;
70461 +
70462 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70463 +
70464 nstart = tmp;
70465
70466 if (nstart < prev->vm_end)
70467 diff --git a/mm/mremap.c b/mm/mremap.c
70468 index d6959cb..18a402a 100644
70469 --- a/mm/mremap.c
70470 +++ b/mm/mremap.c
70471 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70472 continue;
70473 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70474 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70475 +
70476 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70477 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70478 + pte = pte_exprotect(pte);
70479 +#endif
70480 +
70481 set_pte_at(mm, new_addr, new_pte, pte);
70482 }
70483
70484 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70485 if (is_vm_hugetlb_page(vma))
70486 goto Einval;
70487
70488 +#ifdef CONFIG_PAX_SEGMEXEC
70489 + if (pax_find_mirror_vma(vma))
70490 + goto Einval;
70491 +#endif
70492 +
70493 /* We can't remap across vm area boundaries */
70494 if (old_len > vma->vm_end - addr)
70495 goto Efault;
70496 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70497 unsigned long ret = -EINVAL;
70498 unsigned long charged = 0;
70499 unsigned long map_flags;
70500 + unsigned long pax_task_size = TASK_SIZE;
70501
70502 if (new_addr & ~PAGE_MASK)
70503 goto out;
70504
70505 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70506 +#ifdef CONFIG_PAX_SEGMEXEC
70507 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70508 + pax_task_size = SEGMEXEC_TASK_SIZE;
70509 +#endif
70510 +
70511 + pax_task_size -= PAGE_SIZE;
70512 +
70513 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70514 goto out;
70515
70516 /* Check if the location we're moving into overlaps the
70517 * old location at all, and fail if it does.
70518 */
70519 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70520 - goto out;
70521 -
70522 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70523 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70524 goto out;
70525
70526 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70527 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70528 struct vm_area_struct *vma;
70529 unsigned long ret = -EINVAL;
70530 unsigned long charged = 0;
70531 + unsigned long pax_task_size = TASK_SIZE;
70532
70533 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70534 goto out;
70535 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70536 if (!new_len)
70537 goto out;
70538
70539 +#ifdef CONFIG_PAX_SEGMEXEC
70540 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70541 + pax_task_size = SEGMEXEC_TASK_SIZE;
70542 +#endif
70543 +
70544 + pax_task_size -= PAGE_SIZE;
70545 +
70546 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70547 + old_len > pax_task_size || addr > pax_task_size-old_len)
70548 + goto out;
70549 +
70550 if (flags & MREMAP_FIXED) {
70551 if (flags & MREMAP_MAYMOVE)
70552 ret = mremap_to(addr, old_len, new_addr, new_len);
70553 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70554 addr + new_len);
70555 }
70556 ret = addr;
70557 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70558 goto out;
70559 }
70560 }
70561 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70562 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70563 if (ret)
70564 goto out;
70565 +
70566 + map_flags = vma->vm_flags;
70567 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70568 + if (!(ret & ~PAGE_MASK)) {
70569 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70570 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70571 + }
70572 }
70573 out:
70574 if (ret & ~PAGE_MASK)
70575 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70576 index 7fa41b4..6087460 100644
70577 --- a/mm/nobootmem.c
70578 +++ b/mm/nobootmem.c
70579 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70580 unsigned long __init free_all_memory_core_early(int nodeid)
70581 {
70582 int i;
70583 - u64 start, end;
70584 + u64 start, end, startrange, endrange;
70585 unsigned long count = 0;
70586 - struct range *range = NULL;
70587 + struct range *range = NULL, rangerange = { 0, 0 };
70588 int nr_range;
70589
70590 nr_range = get_free_all_memory_range(&range, nodeid);
70591 + startrange = __pa(range) >> PAGE_SHIFT;
70592 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70593
70594 for (i = 0; i < nr_range; i++) {
70595 start = range[i].start;
70596 end = range[i].end;
70597 + if (start <= endrange && startrange < end) {
70598 + BUG_ON(rangerange.start | rangerange.end);
70599 + rangerange = range[i];
70600 + continue;
70601 + }
70602 count += end - start;
70603 __free_pages_memory(start, end);
70604 }
70605 + start = rangerange.start;
70606 + end = rangerange.end;
70607 + count += end - start;
70608 + __free_pages_memory(start, end);
70609
70610 return count;
70611 }
70612 diff --git a/mm/nommu.c b/mm/nommu.c
70613 index ee7e57e..cae4e40 100644
70614 --- a/mm/nommu.c
70615 +++ b/mm/nommu.c
70616 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70617 int sysctl_overcommit_ratio = 50; /* default is 50% */
70618 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70619 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70620 -int heap_stack_gap = 0;
70621
70622 atomic_long_t mmap_pages_allocated;
70623
70624 @@ -829,15 +828,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70625 EXPORT_SYMBOL(find_vma);
70626
70627 /*
70628 - * find a VMA
70629 - * - we don't extend stack VMAs under NOMMU conditions
70630 - */
70631 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70632 -{
70633 - return find_vma(mm, addr);
70634 -}
70635 -
70636 -/*
70637 * expand a stack to a given address
70638 * - not supported under NOMMU conditions
70639 */
70640 @@ -1557,6 +1547,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70641
70642 /* most fields are the same, copy all, and then fixup */
70643 *new = *vma;
70644 + INIT_LIST_HEAD(&new->anon_vma_chain);
70645 *region = *vma->vm_region;
70646 new->vm_region = region;
70647
70648 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70649 index 485be89..c059ad3 100644
70650 --- a/mm/page_alloc.c
70651 +++ b/mm/page_alloc.c
70652 @@ -341,7 +341,7 @@ out:
70653 * This usage means that zero-order pages may not be compound.
70654 */
70655
70656 -static void free_compound_page(struct page *page)
70657 +void free_compound_page(struct page *page)
70658 {
70659 __free_pages_ok(page, compound_order(page));
70660 }
70661 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70662 int i;
70663 int bad = 0;
70664
70665 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70666 + unsigned long index = 1UL << order;
70667 +#endif
70668 +
70669 trace_mm_page_free_direct(page, order);
70670 kmemcheck_free_shadow(page, order);
70671
70672 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70673 debug_check_no_obj_freed(page_address(page),
70674 PAGE_SIZE << order);
70675 }
70676 +
70677 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70678 + for (; index; --index)
70679 + sanitize_highpage(page + index - 1);
70680 +#endif
70681 +
70682 arch_free_page(page, order);
70683 kernel_map_pages(page, 1 << order, 0);
70684
70685 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70686 arch_alloc_page(page, order);
70687 kernel_map_pages(page, 1 << order, 1);
70688
70689 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70690 if (gfp_flags & __GFP_ZERO)
70691 prep_zero_page(page, order, gfp_flags);
70692 +#endif
70693
70694 if (order && (gfp_flags & __GFP_COMP))
70695 prep_compound_page(page, order);
70696 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70697 unsigned long pfn;
70698
70699 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70700 +#ifdef CONFIG_X86_32
70701 + /* boot failures in VMware 8 on 32bit vanilla since
70702 + this change */
70703 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70704 +#else
70705 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70706 +#endif
70707 return 1;
70708 }
70709 return 0;
70710 diff --git a/mm/percpu.c b/mm/percpu.c
70711 index 716eb4a..8d10419 100644
70712 --- a/mm/percpu.c
70713 +++ b/mm/percpu.c
70714 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70715 static unsigned int pcpu_high_unit_cpu __read_mostly;
70716
70717 /* the address of the first chunk which starts with the kernel static area */
70718 -void *pcpu_base_addr __read_mostly;
70719 +void *pcpu_base_addr __read_only;
70720 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70721
70722 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70723 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70724 index e920aa3..137702a 100644
70725 --- a/mm/process_vm_access.c
70726 +++ b/mm/process_vm_access.c
70727 @@ -13,6 +13,7 @@
70728 #include <linux/uio.h>
70729 #include <linux/sched.h>
70730 #include <linux/highmem.h>
70731 +#include <linux/security.h>
70732 #include <linux/ptrace.h>
70733 #include <linux/slab.h>
70734 #include <linux/syscalls.h>
70735 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70736 size_t iov_l_curr_offset = 0;
70737 ssize_t iov_len;
70738
70739 + return -ENOSYS; // PaX: until properly audited
70740 +
70741 /*
70742 * Work out how many pages of struct pages we're going to need
70743 * when eventually calling get_user_pages
70744 */
70745 for (i = 0; i < riovcnt; i++) {
70746 iov_len = rvec[i].iov_len;
70747 - if (iov_len > 0) {
70748 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70749 - + iov_len)
70750 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70751 - / PAGE_SIZE + 1;
70752 - nr_pages = max(nr_pages, nr_pages_iov);
70753 - }
70754 + if (iov_len <= 0)
70755 + continue;
70756 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70757 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70758 + nr_pages = max(nr_pages, nr_pages_iov);
70759 }
70760
70761 if (nr_pages == 0)
70762 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70763 goto free_proc_pages;
70764 }
70765
70766 - task_lock(task);
70767 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70768 - task_unlock(task);
70769 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70770 rc = -EPERM;
70771 goto put_task_struct;
70772 }
70773 - mm = task->mm;
70774
70775 - if (!mm || (task->flags & PF_KTHREAD)) {
70776 - task_unlock(task);
70777 - rc = -EINVAL;
70778 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70779 + if (!mm || IS_ERR(mm)) {
70780 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70781 + /*
70782 + * Explicitly map EACCES to EPERM as EPERM is a more a
70783 + * appropriate error code for process_vw_readv/writev
70784 + */
70785 + if (rc == -EACCES)
70786 + rc = -EPERM;
70787 goto put_task_struct;
70788 }
70789
70790 - atomic_inc(&mm->mm_users);
70791 - task_unlock(task);
70792 -
70793 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70794 rc = process_vm_rw_single_vec(
70795 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70796 diff --git a/mm/rmap.c b/mm/rmap.c
70797 index a4fd368..e0ffec7 100644
70798 --- a/mm/rmap.c
70799 +++ b/mm/rmap.c
70800 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70801 struct anon_vma *anon_vma = vma->anon_vma;
70802 struct anon_vma_chain *avc;
70803
70804 +#ifdef CONFIG_PAX_SEGMEXEC
70805 + struct anon_vma_chain *avc_m = NULL;
70806 +#endif
70807 +
70808 might_sleep();
70809 if (unlikely(!anon_vma)) {
70810 struct mm_struct *mm = vma->vm_mm;
70811 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70812 if (!avc)
70813 goto out_enomem;
70814
70815 +#ifdef CONFIG_PAX_SEGMEXEC
70816 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70817 + if (!avc_m)
70818 + goto out_enomem_free_avc;
70819 +#endif
70820 +
70821 anon_vma = find_mergeable_anon_vma(vma);
70822 allocated = NULL;
70823 if (!anon_vma) {
70824 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70825 /* page_table_lock to protect against threads */
70826 spin_lock(&mm->page_table_lock);
70827 if (likely(!vma->anon_vma)) {
70828 +
70829 +#ifdef CONFIG_PAX_SEGMEXEC
70830 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70831 +
70832 + if (vma_m) {
70833 + BUG_ON(vma_m->anon_vma);
70834 + vma_m->anon_vma = anon_vma;
70835 + avc_m->anon_vma = anon_vma;
70836 + avc_m->vma = vma;
70837 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70838 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70839 + avc_m = NULL;
70840 + }
70841 +#endif
70842 +
70843 vma->anon_vma = anon_vma;
70844 avc->anon_vma = anon_vma;
70845 avc->vma = vma;
70846 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70847
70848 if (unlikely(allocated))
70849 put_anon_vma(allocated);
70850 +
70851 +#ifdef CONFIG_PAX_SEGMEXEC
70852 + if (unlikely(avc_m))
70853 + anon_vma_chain_free(avc_m);
70854 +#endif
70855 +
70856 if (unlikely(avc))
70857 anon_vma_chain_free(avc);
70858 }
70859 return 0;
70860
70861 out_enomem_free_avc:
70862 +
70863 +#ifdef CONFIG_PAX_SEGMEXEC
70864 + if (avc_m)
70865 + anon_vma_chain_free(avc_m);
70866 +#endif
70867 +
70868 anon_vma_chain_free(avc);
70869 out_enomem:
70870 return -ENOMEM;
70871 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70872 * Attach the anon_vmas from src to dst.
70873 * Returns 0 on success, -ENOMEM on failure.
70874 */
70875 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70876 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70877 {
70878 struct anon_vma_chain *avc, *pavc;
70879 struct anon_vma *root = NULL;
70880 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70881 * the corresponding VMA in the parent process is attached to.
70882 * Returns 0 on success, non-zero on failure.
70883 */
70884 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70885 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70886 {
70887 struct anon_vma_chain *avc;
70888 struct anon_vma *anon_vma;
70889 diff --git a/mm/shmem.c b/mm/shmem.c
70890 index 6c253f7..367e20a 100644
70891 --- a/mm/shmem.c
70892 +++ b/mm/shmem.c
70893 @@ -31,7 +31,7 @@
70894 #include <linux/export.h>
70895 #include <linux/swap.h>
70896
70897 -static struct vfsmount *shm_mnt;
70898 +struct vfsmount *shm_mnt;
70899
70900 #ifdef CONFIG_SHMEM
70901 /*
70902 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70903 #define BOGO_DIRENT_SIZE 20
70904
70905 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70906 -#define SHORT_SYMLINK_LEN 128
70907 +#define SHORT_SYMLINK_LEN 64
70908
70909 struct shmem_xattr {
70910 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70911 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70912 int err = -ENOMEM;
70913
70914 /* Round up to L1_CACHE_BYTES to resist false sharing */
70915 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70916 - L1_CACHE_BYTES), GFP_KERNEL);
70917 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70918 if (!sbinfo)
70919 return -ENOMEM;
70920
70921 diff --git a/mm/slab.c b/mm/slab.c
70922 index 83311c9a..fcf8f86 100644
70923 --- a/mm/slab.c
70924 +++ b/mm/slab.c
70925 @@ -151,7 +151,7 @@
70926
70927 /* Legal flag mask for kmem_cache_create(). */
70928 #if DEBUG
70929 -# define CREATE_MASK (SLAB_RED_ZONE | \
70930 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70931 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70932 SLAB_CACHE_DMA | \
70933 SLAB_STORE_USER | \
70934 @@ -159,7 +159,7 @@
70935 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70936 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70937 #else
70938 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70939 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70940 SLAB_CACHE_DMA | \
70941 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70942 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70943 @@ -288,7 +288,7 @@ struct kmem_list3 {
70944 * Need this for bootstrapping a per node allocator.
70945 */
70946 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70947 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70948 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70949 #define CACHE_CACHE 0
70950 #define SIZE_AC MAX_NUMNODES
70951 #define SIZE_L3 (2 * MAX_NUMNODES)
70952 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70953 if ((x)->max_freeable < i) \
70954 (x)->max_freeable = i; \
70955 } while (0)
70956 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70957 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70958 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70959 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70960 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70961 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70962 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70963 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70964 #else
70965 #define STATS_INC_ACTIVE(x) do { } while (0)
70966 #define STATS_DEC_ACTIVE(x) do { } while (0)
70967 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70968 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70969 */
70970 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70971 - const struct slab *slab, void *obj)
70972 + const struct slab *slab, const void *obj)
70973 {
70974 u32 offset = (obj - slab->s_mem);
70975 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70976 @@ -564,7 +564,7 @@ struct cache_names {
70977 static struct cache_names __initdata cache_names[] = {
70978 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70979 #include <linux/kmalloc_sizes.h>
70980 - {NULL,}
70981 + {NULL}
70982 #undef CACHE
70983 };
70984
70985 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70986 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70987 sizes[INDEX_AC].cs_size,
70988 ARCH_KMALLOC_MINALIGN,
70989 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70990 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70991 NULL);
70992
70993 if (INDEX_AC != INDEX_L3) {
70994 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70995 kmem_cache_create(names[INDEX_L3].name,
70996 sizes[INDEX_L3].cs_size,
70997 ARCH_KMALLOC_MINALIGN,
70998 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70999 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71000 NULL);
71001 }
71002
71003 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71004 sizes->cs_cachep = kmem_cache_create(names->name,
71005 sizes->cs_size,
71006 ARCH_KMALLOC_MINALIGN,
71007 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
71008 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
71009 NULL);
71010 }
71011 #ifdef CONFIG_ZONE_DMA
71012 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
71013 }
71014 /* cpu stats */
71015 {
71016 - unsigned long allochit = atomic_read(&cachep->allochit);
71017 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
71018 - unsigned long freehit = atomic_read(&cachep->freehit);
71019 - unsigned long freemiss = atomic_read(&cachep->freemiss);
71020 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
71021 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
71022 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
71023 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
71024
71025 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
71026 allochit, allocmiss, freehit, freemiss);
71027 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
71028 {
71029 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
71030 #ifdef CONFIG_DEBUG_SLAB_LEAK
71031 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
71032 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
71033 #endif
71034 return 0;
71035 }
71036 module_init(slab_proc_init);
71037 #endif
71038
71039 +void check_object_size(const void *ptr, unsigned long n, bool to)
71040 +{
71041 +
71042 +#ifdef CONFIG_PAX_USERCOPY
71043 + struct page *page;
71044 + struct kmem_cache *cachep = NULL;
71045 + struct slab *slabp;
71046 + unsigned int objnr;
71047 + unsigned long offset;
71048 + const char *type;
71049 +
71050 + if (!n)
71051 + return;
71052 +
71053 + type = "<null>";
71054 + if (ZERO_OR_NULL_PTR(ptr))
71055 + goto report;
71056 +
71057 + if (!virt_addr_valid(ptr))
71058 + return;
71059 +
71060 + page = virt_to_head_page(ptr);
71061 +
71062 + type = "<process stack>";
71063 + if (!PageSlab(page)) {
71064 + if (object_is_on_stack(ptr, n) == -1)
71065 + goto report;
71066 + return;
71067 + }
71068 +
71069 + cachep = page_get_cache(page);
71070 + type = cachep->name;
71071 + if (!(cachep->flags & SLAB_USERCOPY))
71072 + goto report;
71073 +
71074 + slabp = page_get_slab(page);
71075 + objnr = obj_to_index(cachep, slabp, ptr);
71076 + BUG_ON(objnr >= cachep->num);
71077 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
71078 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
71079 + return;
71080 +
71081 +report:
71082 + pax_report_usercopy(ptr, n, to, type);
71083 +#endif
71084 +
71085 +}
71086 +EXPORT_SYMBOL(check_object_size);
71087 +
71088 /**
71089 * ksize - get the actual amount of memory allocated for a given object
71090 * @objp: Pointer to the object
71091 diff --git a/mm/slob.c b/mm/slob.c
71092 index 8105be4..e045f96 100644
71093 --- a/mm/slob.c
71094 +++ b/mm/slob.c
71095 @@ -29,7 +29,7 @@
71096 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
71097 * alloc_pages() directly, allocating compound pages so the page order
71098 * does not have to be separately tracked, and also stores the exact
71099 - * allocation size in page->private so that it can be used to accurately
71100 + * allocation size in slob_page->size so that it can be used to accurately
71101 * provide ksize(). These objects are detected in kfree() because slob_page()
71102 * is false for them.
71103 *
71104 @@ -58,6 +58,7 @@
71105 */
71106
71107 #include <linux/kernel.h>
71108 +#include <linux/sched.h>
71109 #include <linux/slab.h>
71110 #include <linux/mm.h>
71111 #include <linux/swap.h> /* struct reclaim_state */
71112 @@ -102,7 +103,8 @@ struct slob_page {
71113 unsigned long flags; /* mandatory */
71114 atomic_t _count; /* mandatory */
71115 slobidx_t units; /* free units left in page */
71116 - unsigned long pad[2];
71117 + unsigned long pad[1];
71118 + unsigned long size; /* size when >=PAGE_SIZE */
71119 slob_t *free; /* first free slob_t in page */
71120 struct list_head list; /* linked list of free pages */
71121 };
71122 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
71123 */
71124 static inline int is_slob_page(struct slob_page *sp)
71125 {
71126 - return PageSlab((struct page *)sp);
71127 + return PageSlab((struct page *)sp) && !sp->size;
71128 }
71129
71130 static inline void set_slob_page(struct slob_page *sp)
71131 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
71132
71133 static inline struct slob_page *slob_page(const void *addr)
71134 {
71135 - return (struct slob_page *)virt_to_page(addr);
71136 + return (struct slob_page *)virt_to_head_page(addr);
71137 }
71138
71139 /*
71140 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
71141 /*
71142 * Return the size of a slob block.
71143 */
71144 -static slobidx_t slob_units(slob_t *s)
71145 +static slobidx_t slob_units(const slob_t *s)
71146 {
71147 if (s->units > 0)
71148 return s->units;
71149 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
71150 /*
71151 * Return the next free slob block pointer after this one.
71152 */
71153 -static slob_t *slob_next(slob_t *s)
71154 +static slob_t *slob_next(const slob_t *s)
71155 {
71156 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
71157 slobidx_t next;
71158 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
71159 /*
71160 * Returns true if s is the last free block in its page.
71161 */
71162 -static int slob_last(slob_t *s)
71163 +static int slob_last(const slob_t *s)
71164 {
71165 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
71166 }
71167 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
71168 if (!page)
71169 return NULL;
71170
71171 + set_slob_page(page);
71172 return page_address(page);
71173 }
71174
71175 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
71176 if (!b)
71177 return NULL;
71178 sp = slob_page(b);
71179 - set_slob_page(sp);
71180
71181 spin_lock_irqsave(&slob_lock, flags);
71182 sp->units = SLOB_UNITS(PAGE_SIZE);
71183 sp->free = b;
71184 + sp->size = 0;
71185 INIT_LIST_HEAD(&sp->list);
71186 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
71187 set_slob_page_free(sp, slob_list);
71188 @@ -476,10 +479,9 @@ out:
71189 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
71190 */
71191
71192 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71193 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
71194 {
71195 - unsigned int *m;
71196 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71197 + slob_t *m;
71198 void *ret;
71199
71200 gfp &= gfp_allowed_mask;
71201 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71202
71203 if (!m)
71204 return NULL;
71205 - *m = size;
71206 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
71207 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
71208 + m[0].units = size;
71209 + m[1].units = align;
71210 ret = (void *)m + align;
71211
71212 trace_kmalloc_node(_RET_IP_, ret,
71213 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71214 gfp |= __GFP_COMP;
71215 ret = slob_new_pages(gfp, order, node);
71216 if (ret) {
71217 - struct page *page;
71218 - page = virt_to_page(ret);
71219 - page->private = size;
71220 + struct slob_page *sp;
71221 + sp = slob_page(ret);
71222 + sp->size = size;
71223 }
71224
71225 trace_kmalloc_node(_RET_IP_, ret,
71226 size, PAGE_SIZE << order, gfp, node);
71227 }
71228
71229 - kmemleak_alloc(ret, size, 1, gfp);
71230 + return ret;
71231 +}
71232 +
71233 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
71234 +{
71235 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71236 + void *ret = __kmalloc_node_align(size, gfp, node, align);
71237 +
71238 + if (!ZERO_OR_NULL_PTR(ret))
71239 + kmemleak_alloc(ret, size, 1, gfp);
71240 return ret;
71241 }
71242 EXPORT_SYMBOL(__kmalloc_node);
71243 @@ -533,13 +547,92 @@ void kfree(const void *block)
71244 sp = slob_page(block);
71245 if (is_slob_page(sp)) {
71246 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71247 - unsigned int *m = (unsigned int *)(block - align);
71248 - slob_free(m, *m + align);
71249 - } else
71250 + slob_t *m = (slob_t *)(block - align);
71251 + slob_free(m, m[0].units + align);
71252 + } else {
71253 + clear_slob_page(sp);
71254 + free_slob_page(sp);
71255 + sp->size = 0;
71256 put_page(&sp->page);
71257 + }
71258 }
71259 EXPORT_SYMBOL(kfree);
71260
71261 +void check_object_size(const void *ptr, unsigned long n, bool to)
71262 +{
71263 +
71264 +#ifdef CONFIG_PAX_USERCOPY
71265 + struct slob_page *sp;
71266 + const slob_t *free;
71267 + const void *base;
71268 + unsigned long flags;
71269 + const char *type;
71270 +
71271 + if (!n)
71272 + return;
71273 +
71274 + type = "<null>";
71275 + if (ZERO_OR_NULL_PTR(ptr))
71276 + goto report;
71277 +
71278 + if (!virt_addr_valid(ptr))
71279 + return;
71280 +
71281 + type = "<process stack>";
71282 + sp = slob_page(ptr);
71283 + if (!PageSlab((struct page *)sp)) {
71284 + if (object_is_on_stack(ptr, n) == -1)
71285 + goto report;
71286 + return;
71287 + }
71288 +
71289 + type = "<slob>";
71290 + if (sp->size) {
71291 + base = page_address(&sp->page);
71292 + if (base <= ptr && n <= sp->size - (ptr - base))
71293 + return;
71294 + goto report;
71295 + }
71296 +
71297 + /* some tricky double walking to find the chunk */
71298 + spin_lock_irqsave(&slob_lock, flags);
71299 + base = (void *)((unsigned long)ptr & PAGE_MASK);
71300 + free = sp->free;
71301 +
71302 + while (!slob_last(free) && (void *)free <= ptr) {
71303 + base = free + slob_units(free);
71304 + free = slob_next(free);
71305 + }
71306 +
71307 + while (base < (void *)free) {
71308 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71309 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
71310 + int offset;
71311 +
71312 + if (ptr < base + align)
71313 + break;
71314 +
71315 + offset = ptr - base - align;
71316 + if (offset >= m) {
71317 + base += size;
71318 + continue;
71319 + }
71320 +
71321 + if (n > m - offset)
71322 + break;
71323 +
71324 + spin_unlock_irqrestore(&slob_lock, flags);
71325 + return;
71326 + }
71327 +
71328 + spin_unlock_irqrestore(&slob_lock, flags);
71329 +report:
71330 + pax_report_usercopy(ptr, n, to, type);
71331 +#endif
71332 +
71333 +}
71334 +EXPORT_SYMBOL(check_object_size);
71335 +
71336 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71337 size_t ksize(const void *block)
71338 {
71339 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
71340 sp = slob_page(block);
71341 if (is_slob_page(sp)) {
71342 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71343 - unsigned int *m = (unsigned int *)(block - align);
71344 - return SLOB_UNITS(*m) * SLOB_UNIT;
71345 + slob_t *m = (slob_t *)(block - align);
71346 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71347 } else
71348 - return sp->page.private;
71349 + return sp->size;
71350 }
71351 EXPORT_SYMBOL(ksize);
71352
71353 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71354 {
71355 struct kmem_cache *c;
71356
71357 +#ifdef CONFIG_PAX_USERCOPY
71358 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
71359 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71360 +#else
71361 c = slob_alloc(sizeof(struct kmem_cache),
71362 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71363 +#endif
71364
71365 if (c) {
71366 c->name = name;
71367 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71368
71369 lockdep_trace_alloc(flags);
71370
71371 +#ifdef CONFIG_PAX_USERCOPY
71372 + b = __kmalloc_node_align(c->size, flags, node, c->align);
71373 +#else
71374 if (c->size < PAGE_SIZE) {
71375 b = slob_alloc(c->size, flags, c->align, node);
71376 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71377 SLOB_UNITS(c->size) * SLOB_UNIT,
71378 flags, node);
71379 } else {
71380 + struct slob_page *sp;
71381 +
71382 b = slob_new_pages(flags, get_order(c->size), node);
71383 + sp = slob_page(b);
71384 + sp->size = c->size;
71385 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71386 PAGE_SIZE << get_order(c->size),
71387 flags, node);
71388 }
71389 +#endif
71390
71391 if (c->ctor)
71392 c->ctor(b);
71393 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71394
71395 static void __kmem_cache_free(void *b, int size)
71396 {
71397 - if (size < PAGE_SIZE)
71398 + struct slob_page *sp = slob_page(b);
71399 +
71400 + if (is_slob_page(sp))
71401 slob_free(b, size);
71402 - else
71403 + else {
71404 + clear_slob_page(sp);
71405 + free_slob_page(sp);
71406 + sp->size = 0;
71407 slob_free_pages(b, get_order(size));
71408 + }
71409 }
71410
71411 static void kmem_rcu_free(struct rcu_head *head)
71412 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71413
71414 void kmem_cache_free(struct kmem_cache *c, void *b)
71415 {
71416 + int size = c->size;
71417 +
71418 +#ifdef CONFIG_PAX_USERCOPY
71419 + if (size + c->align < PAGE_SIZE) {
71420 + size += c->align;
71421 + b -= c->align;
71422 + }
71423 +#endif
71424 +
71425 kmemleak_free_recursive(b, c->flags);
71426 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71427 struct slob_rcu *slob_rcu;
71428 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71429 - slob_rcu->size = c->size;
71430 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71431 + slob_rcu->size = size;
71432 call_rcu(&slob_rcu->head, kmem_rcu_free);
71433 } else {
71434 - __kmem_cache_free(b, c->size);
71435 + __kmem_cache_free(b, size);
71436 }
71437
71438 +#ifdef CONFIG_PAX_USERCOPY
71439 + trace_kfree(_RET_IP_, b);
71440 +#else
71441 trace_kmem_cache_free(_RET_IP_, b);
71442 +#endif
71443 +
71444 }
71445 EXPORT_SYMBOL(kmem_cache_free);
71446
71447 diff --git a/mm/slub.c b/mm/slub.c
71448 index 1a919f0..1739c9b 100644
71449 --- a/mm/slub.c
71450 +++ b/mm/slub.c
71451 @@ -208,7 +208,7 @@ struct track {
71452
71453 enum track_item { TRACK_ALLOC, TRACK_FREE };
71454
71455 -#ifdef CONFIG_SYSFS
71456 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71457 static int sysfs_slab_add(struct kmem_cache *);
71458 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71459 static void sysfs_slab_remove(struct kmem_cache *);
71460 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71461 if (!t->addr)
71462 return;
71463
71464 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71465 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71466 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71467 #ifdef CONFIG_STACKTRACE
71468 {
71469 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71470
71471 page = virt_to_head_page(x);
71472
71473 + BUG_ON(!PageSlab(page));
71474 +
71475 slab_free(s, page, x, _RET_IP_);
71476
71477 trace_kmem_cache_free(_RET_IP_, x);
71478 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71479 * Merge control. If this is set then no merging of slab caches will occur.
71480 * (Could be removed. This was introduced to pacify the merge skeptics.)
71481 */
71482 -static int slub_nomerge;
71483 +static int slub_nomerge = 1;
71484
71485 /*
71486 * Calculate the order of allocation given an slab object size.
71487 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71488 else
71489 s->cpu_partial = 30;
71490
71491 - s->refcount = 1;
71492 + atomic_set(&s->refcount, 1);
71493 #ifdef CONFIG_NUMA
71494 s->remote_node_defrag_ratio = 1000;
71495 #endif
71496 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71497 void kmem_cache_destroy(struct kmem_cache *s)
71498 {
71499 down_write(&slub_lock);
71500 - s->refcount--;
71501 - if (!s->refcount) {
71502 + if (atomic_dec_and_test(&s->refcount)) {
71503 list_del(&s->list);
71504 up_write(&slub_lock);
71505 if (kmem_cache_close(s)) {
71506 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71507 EXPORT_SYMBOL(__kmalloc_node);
71508 #endif
71509
71510 +void check_object_size(const void *ptr, unsigned long n, bool to)
71511 +{
71512 +
71513 +#ifdef CONFIG_PAX_USERCOPY
71514 + struct page *page;
71515 + struct kmem_cache *s = NULL;
71516 + unsigned long offset;
71517 + const char *type;
71518 +
71519 + if (!n)
71520 + return;
71521 +
71522 + type = "<null>";
71523 + if (ZERO_OR_NULL_PTR(ptr))
71524 + goto report;
71525 +
71526 + if (!virt_addr_valid(ptr))
71527 + return;
71528 +
71529 + page = virt_to_head_page(ptr);
71530 +
71531 + type = "<process stack>";
71532 + if (!PageSlab(page)) {
71533 + if (object_is_on_stack(ptr, n) == -1)
71534 + goto report;
71535 + return;
71536 + }
71537 +
71538 + s = page->slab;
71539 + type = s->name;
71540 + if (!(s->flags & SLAB_USERCOPY))
71541 + goto report;
71542 +
71543 + offset = (ptr - page_address(page)) % s->size;
71544 + if (offset <= s->objsize && n <= s->objsize - offset)
71545 + return;
71546 +
71547 +report:
71548 + pax_report_usercopy(ptr, n, to, type);
71549 +#endif
71550 +
71551 +}
71552 +EXPORT_SYMBOL(check_object_size);
71553 +
71554 size_t ksize(const void *object)
71555 {
71556 struct page *page;
71557 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71558 int node;
71559
71560 list_add(&s->list, &slab_caches);
71561 - s->refcount = -1;
71562 + atomic_set(&s->refcount, -1);
71563
71564 for_each_node_state(node, N_NORMAL_MEMORY) {
71565 struct kmem_cache_node *n = get_node(s, node);
71566 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71567
71568 /* Caches that are not of the two-to-the-power-of size */
71569 if (KMALLOC_MIN_SIZE <= 32) {
71570 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71571 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71572 caches++;
71573 }
71574
71575 if (KMALLOC_MIN_SIZE <= 64) {
71576 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71577 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71578 caches++;
71579 }
71580
71581 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71582 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71583 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71584 caches++;
71585 }
71586
71587 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71588 /*
71589 * We may have set a slab to be unmergeable during bootstrap.
71590 */
71591 - if (s->refcount < 0)
71592 + if (atomic_read(&s->refcount) < 0)
71593 return 1;
71594
71595 return 0;
71596 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71597 down_write(&slub_lock);
71598 s = find_mergeable(size, align, flags, name, ctor);
71599 if (s) {
71600 - s->refcount++;
71601 + atomic_inc(&s->refcount);
71602 /*
71603 * Adjust the object sizes so that we clear
71604 * the complete object on kzalloc.
71605 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71606 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71607
71608 if (sysfs_slab_alias(s, name)) {
71609 - s->refcount--;
71610 + atomic_dec(&s->refcount);
71611 goto err;
71612 }
71613 up_write(&slub_lock);
71614 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71615 }
71616 #endif
71617
71618 -#ifdef CONFIG_SYSFS
71619 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71620 static int count_inuse(struct page *page)
71621 {
71622 return page->inuse;
71623 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71624 validate_slab_cache(kmalloc_caches[9]);
71625 }
71626 #else
71627 -#ifdef CONFIG_SYSFS
71628 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71629 static void resiliency_test(void) {};
71630 #endif
71631 #endif
71632
71633 -#ifdef CONFIG_SYSFS
71634 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71635 enum slab_stat_type {
71636 SL_ALL, /* All slabs */
71637 SL_PARTIAL, /* Only partially allocated slabs */
71638 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71639
71640 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71641 {
71642 - return sprintf(buf, "%d\n", s->refcount - 1);
71643 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71644 }
71645 SLAB_ATTR_RO(aliases);
71646
71647 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71648 return name;
71649 }
71650
71651 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71652 static int sysfs_slab_add(struct kmem_cache *s)
71653 {
71654 int err;
71655 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71656 kobject_del(&s->kobj);
71657 kobject_put(&s->kobj);
71658 }
71659 +#endif
71660
71661 /*
71662 * Need to buffer aliases during bootup until sysfs becomes
71663 @@ -5298,6 +5345,7 @@ struct saved_alias {
71664
71665 static struct saved_alias *alias_list;
71666
71667 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71668 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71669 {
71670 struct saved_alias *al;
71671 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71672 alias_list = al;
71673 return 0;
71674 }
71675 +#endif
71676
71677 static int __init slab_sysfs_init(void)
71678 {
71679 diff --git a/mm/swap.c b/mm/swap.c
71680 index 55b266d..a532537 100644
71681 --- a/mm/swap.c
71682 +++ b/mm/swap.c
71683 @@ -31,6 +31,7 @@
71684 #include <linux/backing-dev.h>
71685 #include <linux/memcontrol.h>
71686 #include <linux/gfp.h>
71687 +#include <linux/hugetlb.h>
71688
71689 #include "internal.h"
71690
71691 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71692
71693 __page_cache_release(page);
71694 dtor = get_compound_page_dtor(page);
71695 + if (!PageHuge(page))
71696 + BUG_ON(dtor != free_compound_page);
71697 (*dtor)(page);
71698 }
71699
71700 diff --git a/mm/swapfile.c b/mm/swapfile.c
71701 index b1cd120..aaae885 100644
71702 --- a/mm/swapfile.c
71703 +++ b/mm/swapfile.c
71704 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71705
71706 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71707 /* Activity counter to indicate that a swapon or swapoff has occurred */
71708 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71709 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71710
71711 static inline unsigned char swap_count(unsigned char ent)
71712 {
71713 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71714 }
71715 filp_close(swap_file, NULL);
71716 err = 0;
71717 - atomic_inc(&proc_poll_event);
71718 + atomic_inc_unchecked(&proc_poll_event);
71719 wake_up_interruptible(&proc_poll_wait);
71720
71721 out_dput:
71722 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71723
71724 poll_wait(file, &proc_poll_wait, wait);
71725
71726 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71727 - seq->poll_event = atomic_read(&proc_poll_event);
71728 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71729 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71730 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71731 }
71732
71733 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71734 return ret;
71735
71736 seq = file->private_data;
71737 - seq->poll_event = atomic_read(&proc_poll_event);
71738 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71739 return 0;
71740 }
71741
71742 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71743 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71744
71745 mutex_unlock(&swapon_mutex);
71746 - atomic_inc(&proc_poll_event);
71747 + atomic_inc_unchecked(&proc_poll_event);
71748 wake_up_interruptible(&proc_poll_wait);
71749
71750 if (S_ISREG(inode->i_mode))
71751 diff --git a/mm/util.c b/mm/util.c
71752 index 136ac4f..5117eef 100644
71753 --- a/mm/util.c
71754 +++ b/mm/util.c
71755 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71756 * allocated buffer. Use this if you don't want to free the buffer immediately
71757 * like, for example, with RCU.
71758 */
71759 +#undef __krealloc
71760 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71761 {
71762 void *ret;
71763 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71764 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71765 * %NULL pointer, the object pointed to is freed.
71766 */
71767 +#undef krealloc
71768 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71769 {
71770 void *ret;
71771 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71772 void arch_pick_mmap_layout(struct mm_struct *mm)
71773 {
71774 mm->mmap_base = TASK_UNMAPPED_BASE;
71775 +
71776 +#ifdef CONFIG_PAX_RANDMMAP
71777 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71778 + mm->mmap_base += mm->delta_mmap;
71779 +#endif
71780 +
71781 mm->get_unmapped_area = arch_get_unmapped_area;
71782 mm->unmap_area = arch_unmap_area;
71783 }
71784 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71785 index 27be2f0..0aef2c2 100644
71786 --- a/mm/vmalloc.c
71787 +++ b/mm/vmalloc.c
71788 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71789
71790 pte = pte_offset_kernel(pmd, addr);
71791 do {
71792 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71793 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71794 +
71795 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71796 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71797 + BUG_ON(!pte_exec(*pte));
71798 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71799 + continue;
71800 + }
71801 +#endif
71802 +
71803 + {
71804 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71805 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71806 + }
71807 } while (pte++, addr += PAGE_SIZE, addr != end);
71808 }
71809
71810 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71811 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71812 {
71813 pte_t *pte;
71814 + int ret = -ENOMEM;
71815
71816 /*
71817 * nr is a running index into the array which helps higher level
71818 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71819 pte = pte_alloc_kernel(pmd, addr);
71820 if (!pte)
71821 return -ENOMEM;
71822 +
71823 + pax_open_kernel();
71824 do {
71825 struct page *page = pages[*nr];
71826
71827 - if (WARN_ON(!pte_none(*pte)))
71828 - return -EBUSY;
71829 - if (WARN_ON(!page))
71830 - return -ENOMEM;
71831 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71832 + if (pgprot_val(prot) & _PAGE_NX)
71833 +#endif
71834 +
71835 + if (WARN_ON(!pte_none(*pte))) {
71836 + ret = -EBUSY;
71837 + goto out;
71838 + }
71839 + if (WARN_ON(!page)) {
71840 + ret = -ENOMEM;
71841 + goto out;
71842 + }
71843 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71844 (*nr)++;
71845 } while (pte++, addr += PAGE_SIZE, addr != end);
71846 - return 0;
71847 + ret = 0;
71848 +out:
71849 + pax_close_kernel();
71850 + return ret;
71851 }
71852
71853 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71854 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71855 * and fall back on vmalloc() if that fails. Others
71856 * just put it in the vmalloc space.
71857 */
71858 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71859 +#ifdef CONFIG_MODULES
71860 +#ifdef MODULES_VADDR
71861 unsigned long addr = (unsigned long)x;
71862 if (addr >= MODULES_VADDR && addr < MODULES_END)
71863 return 1;
71864 #endif
71865 +
71866 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71867 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71868 + return 1;
71869 +#endif
71870 +
71871 +#endif
71872 +
71873 return is_vmalloc_addr(x);
71874 }
71875
71876 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71877
71878 if (!pgd_none(*pgd)) {
71879 pud_t *pud = pud_offset(pgd, addr);
71880 +#ifdef CONFIG_X86
71881 + if (!pud_large(*pud))
71882 +#endif
71883 if (!pud_none(*pud)) {
71884 pmd_t *pmd = pmd_offset(pud, addr);
71885 +#ifdef CONFIG_X86
71886 + if (!pmd_large(*pmd))
71887 +#endif
71888 if (!pmd_none(*pmd)) {
71889 pte_t *ptep, pte;
71890
71891 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71892 struct vm_struct *area;
71893
71894 BUG_ON(in_interrupt());
71895 +
71896 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71897 + if (flags & VM_KERNEXEC) {
71898 + if (start != VMALLOC_START || end != VMALLOC_END)
71899 + return NULL;
71900 + start = (unsigned long)MODULES_EXEC_VADDR;
71901 + end = (unsigned long)MODULES_EXEC_END;
71902 + }
71903 +#endif
71904 +
71905 if (flags & VM_IOREMAP) {
71906 int bit = fls(size);
71907
71908 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71909 if (count > totalram_pages)
71910 return NULL;
71911
71912 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71913 + if (!(pgprot_val(prot) & _PAGE_NX))
71914 + flags |= VM_KERNEXEC;
71915 +#endif
71916 +
71917 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71918 __builtin_return_address(0));
71919 if (!area)
71920 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71921 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71922 goto fail;
71923
71924 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71925 + if (!(pgprot_val(prot) & _PAGE_NX))
71926 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71927 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71928 + else
71929 +#endif
71930 +
71931 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71932 start, end, node, gfp_mask, caller);
71933 if (!area)
71934 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71935 gfp_mask, prot, node, caller);
71936 }
71937
71938 +#undef __vmalloc
71939 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71940 {
71941 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71942 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71943 * For tight control over page level allocator and protection flags
71944 * use __vmalloc() instead.
71945 */
71946 +#undef vmalloc
71947 void *vmalloc(unsigned long size)
71948 {
71949 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71950 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71951 * For tight control over page level allocator and protection flags
71952 * use __vmalloc() instead.
71953 */
71954 +#undef vzalloc
71955 void *vzalloc(unsigned long size)
71956 {
71957 return __vmalloc_node_flags(size, -1,
71958 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71959 * The resulting memory area is zeroed so it can be mapped to userspace
71960 * without leaking data.
71961 */
71962 +#undef vmalloc_user
71963 void *vmalloc_user(unsigned long size)
71964 {
71965 struct vm_struct *area;
71966 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71967 * For tight control over page level allocator and protection flags
71968 * use __vmalloc() instead.
71969 */
71970 +#undef vmalloc_node
71971 void *vmalloc_node(unsigned long size, int node)
71972 {
71973 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71974 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71975 * For tight control over page level allocator and protection flags
71976 * use __vmalloc_node() instead.
71977 */
71978 +#undef vzalloc_node
71979 void *vzalloc_node(unsigned long size, int node)
71980 {
71981 return __vmalloc_node_flags(size, node,
71982 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71983 * For tight control over page level allocator and protection flags
71984 * use __vmalloc() instead.
71985 */
71986 -
71987 +#undef vmalloc_exec
71988 void *vmalloc_exec(unsigned long size)
71989 {
71990 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71991 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71992 -1, __builtin_return_address(0));
71993 }
71994
71995 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71996 * Allocate enough 32bit PA addressable pages to cover @size from the
71997 * page level allocator and map them into contiguous kernel virtual space.
71998 */
71999 +#undef vmalloc_32
72000 void *vmalloc_32(unsigned long size)
72001 {
72002 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
72003 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
72004 * The resulting memory area is 32bit addressable and zeroed so it can be
72005 * mapped to userspace without leaking data.
72006 */
72007 +#undef vmalloc_32_user
72008 void *vmalloc_32_user(unsigned long size)
72009 {
72010 struct vm_struct *area;
72011 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
72012 unsigned long uaddr = vma->vm_start;
72013 unsigned long usize = vma->vm_end - vma->vm_start;
72014
72015 + BUG_ON(vma->vm_mirror);
72016 +
72017 if ((PAGE_SIZE-1) & (unsigned long)addr)
72018 return -EINVAL;
72019
72020 diff --git a/mm/vmstat.c b/mm/vmstat.c
72021 index 8fd603b..cf0d930 100644
72022 --- a/mm/vmstat.c
72023 +++ b/mm/vmstat.c
72024 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
72025 *
72026 * vm_stat contains the global counters
72027 */
72028 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72029 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
72030 EXPORT_SYMBOL(vm_stat);
72031
72032 #ifdef CONFIG_SMP
72033 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
72034 v = p->vm_stat_diff[i];
72035 p->vm_stat_diff[i] = 0;
72036 local_irq_restore(flags);
72037 - atomic_long_add(v, &zone->vm_stat[i]);
72038 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
72039 global_diff[i] += v;
72040 #ifdef CONFIG_NUMA
72041 /* 3 seconds idle till flush */
72042 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
72043
72044 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
72045 if (global_diff[i])
72046 - atomic_long_add(global_diff[i], &vm_stat[i]);
72047 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
72048 }
72049
72050 #endif
72051 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
72052 start_cpu_timer(cpu);
72053 #endif
72054 #ifdef CONFIG_PROC_FS
72055 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
72056 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
72057 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
72058 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
72059 + {
72060 + mode_t gr_mode = S_IRUGO;
72061 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72062 + gr_mode = S_IRUSR;
72063 +#endif
72064 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
72065 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
72066 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72067 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
72068 +#else
72069 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
72070 +#endif
72071 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
72072 + }
72073 #endif
72074 return 0;
72075 }
72076 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
72077 index 5471628..cef8398 100644
72078 --- a/net/8021q/vlan.c
72079 +++ b/net/8021q/vlan.c
72080 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
72081 err = -EPERM;
72082 if (!capable(CAP_NET_ADMIN))
72083 break;
72084 - if ((args.u.name_type >= 0) &&
72085 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
72086 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
72087 struct vlan_net *vn;
72088
72089 vn = net_generic(net, vlan_net_id);
72090 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
72091 index fdfdb57..38d368c 100644
72092 --- a/net/9p/trans_fd.c
72093 +++ b/net/9p/trans_fd.c
72094 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
72095 oldfs = get_fs();
72096 set_fs(get_ds());
72097 /* The cast to a user pointer is valid due to the set_fs() */
72098 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
72099 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
72100 set_fs(oldfs);
72101
72102 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
72103 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
72104 index f41f026..fe76ea8 100644
72105 --- a/net/atm/atm_misc.c
72106 +++ b/net/atm/atm_misc.c
72107 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
72108 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
72109 return 1;
72110 atm_return(vcc, truesize);
72111 - atomic_inc(&vcc->stats->rx_drop);
72112 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72113 return 0;
72114 }
72115 EXPORT_SYMBOL(atm_charge);
72116 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
72117 }
72118 }
72119 atm_return(vcc, guess);
72120 - atomic_inc(&vcc->stats->rx_drop);
72121 + atomic_inc_unchecked(&vcc->stats->rx_drop);
72122 return NULL;
72123 }
72124 EXPORT_SYMBOL(atm_alloc_charge);
72125 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
72126
72127 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72128 {
72129 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72130 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72131 __SONET_ITEMS
72132 #undef __HANDLE_ITEM
72133 }
72134 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
72135
72136 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
72137 {
72138 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72139 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
72140 __SONET_ITEMS
72141 #undef __HANDLE_ITEM
72142 }
72143 diff --git a/net/atm/lec.h b/net/atm/lec.h
72144 index dfc0719..47c5322 100644
72145 --- a/net/atm/lec.h
72146 +++ b/net/atm/lec.h
72147 @@ -48,7 +48,7 @@ struct lane2_ops {
72148 const u8 *tlvs, u32 sizeoftlvs);
72149 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
72150 const u8 *tlvs, u32 sizeoftlvs);
72151 -};
72152 +} __no_const;
72153
72154 /*
72155 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
72156 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
72157 index 0919a88..a23d54e 100644
72158 --- a/net/atm/mpc.h
72159 +++ b/net/atm/mpc.h
72160 @@ -33,7 +33,7 @@ struct mpoa_client {
72161 struct mpc_parameters parameters; /* parameters for this client */
72162
72163 const struct net_device_ops *old_ops;
72164 - struct net_device_ops new_ops;
72165 + net_device_ops_no_const new_ops;
72166 };
72167
72168
72169 diff --git a/net/atm/proc.c b/net/atm/proc.c
72170 index 0d020de..011c7bb 100644
72171 --- a/net/atm/proc.c
72172 +++ b/net/atm/proc.c
72173 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
72174 const struct k_atm_aal_stats *stats)
72175 {
72176 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
72177 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
72178 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
72179 - atomic_read(&stats->rx_drop));
72180 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
72181 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
72182 + atomic_read_unchecked(&stats->rx_drop));
72183 }
72184
72185 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
72186 diff --git a/net/atm/resources.c b/net/atm/resources.c
72187 index 23f45ce..c748f1a 100644
72188 --- a/net/atm/resources.c
72189 +++ b/net/atm/resources.c
72190 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
72191 static void copy_aal_stats(struct k_atm_aal_stats *from,
72192 struct atm_aal_stats *to)
72193 {
72194 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
72195 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
72196 __AAL_STAT_ITEMS
72197 #undef __HANDLE_ITEM
72198 }
72199 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
72200 static void subtract_aal_stats(struct k_atm_aal_stats *from,
72201 struct atm_aal_stats *to)
72202 {
72203 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
72204 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
72205 __AAL_STAT_ITEMS
72206 #undef __HANDLE_ITEM
72207 }
72208 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
72209 index 3512e25..2b33401 100644
72210 --- a/net/batman-adv/bat_iv_ogm.c
72211 +++ b/net/batman-adv/bat_iv_ogm.c
72212 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72213
72214 /* change sequence number to network order */
72215 batman_ogm_packet->seqno =
72216 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
72217 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
72218
72219 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
72220 batman_ogm_packet->tt_crc = htons((uint16_t)
72221 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
72222 else
72223 batman_ogm_packet->gw_flags = NO_FLAGS;
72224
72225 - atomic_inc(&hard_iface->seqno);
72226 + atomic_inc_unchecked(&hard_iface->seqno);
72227
72228 slide_own_bcast_window(hard_iface);
72229 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
72230 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
72231 return;
72232
72233 /* could be changed by schedule_own_packet() */
72234 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
72235 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
72236
72237 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
72238
72239 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
72240 index 7704df4..beb4e16 100644
72241 --- a/net/batman-adv/hard-interface.c
72242 +++ b/net/batman-adv/hard-interface.c
72243 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
72244 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
72245 dev_add_pack(&hard_iface->batman_adv_ptype);
72246
72247 - atomic_set(&hard_iface->seqno, 1);
72248 - atomic_set(&hard_iface->frag_seqno, 1);
72249 + atomic_set_unchecked(&hard_iface->seqno, 1);
72250 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72251 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72252 hard_iface->net_dev->name);
72253
72254 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72255 index f9cc957..efd9dae 100644
72256 --- a/net/batman-adv/soft-interface.c
72257 +++ b/net/batman-adv/soft-interface.c
72258 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72259
72260 /* set broadcast sequence number */
72261 bcast_packet->seqno =
72262 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72263 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72264
72265 add_bcast_packet_to_list(bat_priv, skb, 1);
72266
72267 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72268 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72269
72270 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72271 - atomic_set(&bat_priv->bcast_seqno, 1);
72272 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72273 atomic_set(&bat_priv->ttvn, 0);
72274 atomic_set(&bat_priv->tt_local_changes, 0);
72275 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72276 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72277 index ab8d0fe..ceba3fd 100644
72278 --- a/net/batman-adv/types.h
72279 +++ b/net/batman-adv/types.h
72280 @@ -38,8 +38,8 @@ struct hard_iface {
72281 int16_t if_num;
72282 char if_status;
72283 struct net_device *net_dev;
72284 - atomic_t seqno;
72285 - atomic_t frag_seqno;
72286 + atomic_unchecked_t seqno;
72287 + atomic_unchecked_t frag_seqno;
72288 unsigned char *packet_buff;
72289 int packet_len;
72290 struct kobject *hardif_obj;
72291 @@ -154,7 +154,7 @@ struct bat_priv {
72292 atomic_t orig_interval; /* uint */
72293 atomic_t hop_penalty; /* uint */
72294 atomic_t log_level; /* uint */
72295 - atomic_t bcast_seqno;
72296 + atomic_unchecked_t bcast_seqno;
72297 atomic_t bcast_queue_left;
72298 atomic_t batman_queue_left;
72299 atomic_t ttvn; /* translation table version number */
72300 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72301 index 07d1c1d..7e9bea9 100644
72302 --- a/net/batman-adv/unicast.c
72303 +++ b/net/batman-adv/unicast.c
72304 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72305 frag1->flags = UNI_FRAG_HEAD | large_tail;
72306 frag2->flags = large_tail;
72307
72308 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72309 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72310 frag1->seqno = htons(seqno - 1);
72311 frag2->seqno = htons(seqno);
72312
72313 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72314 index c1c597e..05ebb40 100644
72315 --- a/net/bluetooth/hci_conn.c
72316 +++ b/net/bluetooth/hci_conn.c
72317 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72318 memset(&cp, 0, sizeof(cp));
72319
72320 cp.handle = cpu_to_le16(conn->handle);
72321 - memcpy(cp.ltk, ltk, sizeof(ltk));
72322 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72323
72324 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72325 }
72326 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72327 index 17b5b1c..826d872 100644
72328 --- a/net/bluetooth/l2cap_core.c
72329 +++ b/net/bluetooth/l2cap_core.c
72330 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72331 break;
72332
72333 case L2CAP_CONF_RFC:
72334 - if (olen == sizeof(rfc))
72335 - memcpy(&rfc, (void *)val, olen);
72336 + if (olen != sizeof(rfc))
72337 + break;
72338 +
72339 + memcpy(&rfc, (void *)val, olen);
72340
72341 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72342 rfc.mode != chan->mode)
72343 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72344
72345 switch (type) {
72346 case L2CAP_CONF_RFC:
72347 - if (olen == sizeof(rfc))
72348 - memcpy(&rfc, (void *)val, olen);
72349 + if (olen != sizeof(rfc))
72350 + break;
72351 +
72352 + memcpy(&rfc, (void *)val, olen);
72353 goto done;
72354 }
72355 }
72356 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72357 index a5f4e57..910ee6d 100644
72358 --- a/net/bridge/br_multicast.c
72359 +++ b/net/bridge/br_multicast.c
72360 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72361 nexthdr = ip6h->nexthdr;
72362 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72363
72364 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72365 + if (nexthdr != IPPROTO_ICMPV6)
72366 return 0;
72367
72368 /* Okay, we found ICMPv6 header */
72369 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72370 index 5864cc4..121f3a3 100644
72371 --- a/net/bridge/netfilter/ebtables.c
72372 +++ b/net/bridge/netfilter/ebtables.c
72373 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72374 tmp.valid_hooks = t->table->valid_hooks;
72375 }
72376 mutex_unlock(&ebt_mutex);
72377 - if (copy_to_user(user, &tmp, *len) != 0){
72378 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72379 BUGPRINT("c2u Didn't work\n");
72380 ret = -EFAULT;
72381 break;
72382 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72383 index a986280..13444a1 100644
72384 --- a/net/caif/caif_socket.c
72385 +++ b/net/caif/caif_socket.c
72386 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72387 #ifdef CONFIG_DEBUG_FS
72388 struct debug_fs_counter {
72389 atomic_t caif_nr_socks;
72390 - atomic_t caif_sock_create;
72391 - atomic_t num_connect_req;
72392 - atomic_t num_connect_resp;
72393 - atomic_t num_connect_fail_resp;
72394 - atomic_t num_disconnect;
72395 - atomic_t num_remote_shutdown_ind;
72396 - atomic_t num_tx_flow_off_ind;
72397 - atomic_t num_tx_flow_on_ind;
72398 - atomic_t num_rx_flow_off;
72399 - atomic_t num_rx_flow_on;
72400 + atomic_unchecked_t caif_sock_create;
72401 + atomic_unchecked_t num_connect_req;
72402 + atomic_unchecked_t num_connect_resp;
72403 + atomic_unchecked_t num_connect_fail_resp;
72404 + atomic_unchecked_t num_disconnect;
72405 + atomic_unchecked_t num_remote_shutdown_ind;
72406 + atomic_unchecked_t num_tx_flow_off_ind;
72407 + atomic_unchecked_t num_tx_flow_on_ind;
72408 + atomic_unchecked_t num_rx_flow_off;
72409 + atomic_unchecked_t num_rx_flow_on;
72410 };
72411 static struct debug_fs_counter cnt;
72412 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72413 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72414 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72415 #else
72416 #define dbfs_atomic_inc(v) 0
72417 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72418 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72419 sk_rcvbuf_lowwater(cf_sk));
72420 set_rx_flow_off(cf_sk);
72421 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72422 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72423 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72424 }
72425
72426 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72427 set_rx_flow_off(cf_sk);
72428 if (net_ratelimit())
72429 pr_debug("sending flow OFF due to rmem_schedule\n");
72430 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72431 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72432 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72433 }
72434 skb->dev = NULL;
72435 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72436 switch (flow) {
72437 case CAIF_CTRLCMD_FLOW_ON_IND:
72438 /* OK from modem to start sending again */
72439 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72440 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72441 set_tx_flow_on(cf_sk);
72442 cf_sk->sk.sk_state_change(&cf_sk->sk);
72443 break;
72444
72445 case CAIF_CTRLCMD_FLOW_OFF_IND:
72446 /* Modem asks us to shut up */
72447 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72448 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72449 set_tx_flow_off(cf_sk);
72450 cf_sk->sk.sk_state_change(&cf_sk->sk);
72451 break;
72452 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72453 /* We're now connected */
72454 caif_client_register_refcnt(&cf_sk->layer,
72455 cfsk_hold, cfsk_put);
72456 - dbfs_atomic_inc(&cnt.num_connect_resp);
72457 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72458 cf_sk->sk.sk_state = CAIF_CONNECTED;
72459 set_tx_flow_on(cf_sk);
72460 cf_sk->sk.sk_state_change(&cf_sk->sk);
72461 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72462
72463 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72464 /* Connect request failed */
72465 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72466 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72467 cf_sk->sk.sk_err = ECONNREFUSED;
72468 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72469 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72470 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72471
72472 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72473 /* Modem has closed this connection, or device is down. */
72474 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72475 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72476 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72477 cf_sk->sk.sk_err = ECONNRESET;
72478 set_rx_flow_on(cf_sk);
72479 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72480 return;
72481
72482 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72483 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72484 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72485 set_rx_flow_on(cf_sk);
72486 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72487 }
72488 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72489 /*ifindex = id of the interface.*/
72490 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72491
72492 - dbfs_atomic_inc(&cnt.num_connect_req);
72493 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72494 cf_sk->layer.receive = caif_sktrecv_cb;
72495
72496 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72497 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72498 spin_unlock_bh(&sk->sk_receive_queue.lock);
72499 sock->sk = NULL;
72500
72501 - dbfs_atomic_inc(&cnt.num_disconnect);
72502 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72503
72504 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72505 if (cf_sk->debugfs_socket_dir != NULL)
72506 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72507 cf_sk->conn_req.protocol = protocol;
72508 /* Increase the number of sockets created. */
72509 dbfs_atomic_inc(&cnt.caif_nr_socks);
72510 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72511 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72512 #ifdef CONFIG_DEBUG_FS
72513 if (!IS_ERR(debugfsdir)) {
72514
72515 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72516 index 5cf5222..6f704ad 100644
72517 --- a/net/caif/cfctrl.c
72518 +++ b/net/caif/cfctrl.c
72519 @@ -9,6 +9,7 @@
72520 #include <linux/stddef.h>
72521 #include <linux/spinlock.h>
72522 #include <linux/slab.h>
72523 +#include <linux/sched.h>
72524 #include <net/caif/caif_layer.h>
72525 #include <net/caif/cfpkt.h>
72526 #include <net/caif/cfctrl.h>
72527 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72528 memset(&dev_info, 0, sizeof(dev_info));
72529 dev_info.id = 0xff;
72530 cfsrvl_init(&this->serv, 0, &dev_info, false);
72531 - atomic_set(&this->req_seq_no, 1);
72532 - atomic_set(&this->rsp_seq_no, 1);
72533 + atomic_set_unchecked(&this->req_seq_no, 1);
72534 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72535 this->serv.layer.receive = cfctrl_recv;
72536 sprintf(this->serv.layer.name, "ctrl");
72537 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72538 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72539 struct cfctrl_request_info *req)
72540 {
72541 spin_lock_bh(&ctrl->info_list_lock);
72542 - atomic_inc(&ctrl->req_seq_no);
72543 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72544 + atomic_inc_unchecked(&ctrl->req_seq_no);
72545 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72546 list_add_tail(&req->list, &ctrl->list);
72547 spin_unlock_bh(&ctrl->info_list_lock);
72548 }
72549 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72550 if (p != first)
72551 pr_warn("Requests are not received in order\n");
72552
72553 - atomic_set(&ctrl->rsp_seq_no,
72554 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72555 p->sequence_no);
72556 list_del(&p->list);
72557 goto out;
72558 diff --git a/net/can/gw.c b/net/can/gw.c
72559 index 3d79b12..8de85fa 100644
72560 --- a/net/can/gw.c
72561 +++ b/net/can/gw.c
72562 @@ -96,7 +96,7 @@ struct cf_mod {
72563 struct {
72564 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72565 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72566 - } csumfunc;
72567 + } __no_const csumfunc;
72568 };
72569
72570
72571 diff --git a/net/compat.c b/net/compat.c
72572 index 6def90e..c6992fa 100644
72573 --- a/net/compat.c
72574 +++ b/net/compat.c
72575 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72576 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72577 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72578 return -EFAULT;
72579 - kmsg->msg_name = compat_ptr(tmp1);
72580 - kmsg->msg_iov = compat_ptr(tmp2);
72581 - kmsg->msg_control = compat_ptr(tmp3);
72582 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72583 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72584 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72585 return 0;
72586 }
72587
72588 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72589
72590 if (kern_msg->msg_namelen) {
72591 if (mode == VERIFY_READ) {
72592 - int err = move_addr_to_kernel(kern_msg->msg_name,
72593 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72594 kern_msg->msg_namelen,
72595 kern_address);
72596 if (err < 0)
72597 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72598 kern_msg->msg_name = NULL;
72599
72600 tot_len = iov_from_user_compat_to_kern(kern_iov,
72601 - (struct compat_iovec __user *)kern_msg->msg_iov,
72602 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72603 kern_msg->msg_iovlen);
72604 if (tot_len >= 0)
72605 kern_msg->msg_iov = kern_iov;
72606 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72607
72608 #define CMSG_COMPAT_FIRSTHDR(msg) \
72609 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72610 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72611 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72612 (struct compat_cmsghdr __user *)NULL)
72613
72614 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72615 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72616 (ucmlen) <= (unsigned long) \
72617 ((mhdr)->msg_controllen - \
72618 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72619 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72620
72621 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72622 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72623 {
72624 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72625 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72626 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72627 msg->msg_controllen)
72628 return NULL;
72629 return (struct compat_cmsghdr __user *)ptr;
72630 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72631 {
72632 struct compat_timeval ctv;
72633 struct compat_timespec cts[3];
72634 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72635 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72636 struct compat_cmsghdr cmhdr;
72637 int cmlen;
72638
72639 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72640
72641 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72642 {
72643 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72644 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72645 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72646 int fdnum = scm->fp->count;
72647 struct file **fp = scm->fp->fp;
72648 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72649 return -EFAULT;
72650 old_fs = get_fs();
72651 set_fs(KERNEL_DS);
72652 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72653 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72654 set_fs(old_fs);
72655
72656 return err;
72657 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72658 len = sizeof(ktime);
72659 old_fs = get_fs();
72660 set_fs(KERNEL_DS);
72661 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72662 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72663 set_fs(old_fs);
72664
72665 if (!err) {
72666 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72667 case MCAST_JOIN_GROUP:
72668 case MCAST_LEAVE_GROUP:
72669 {
72670 - struct compat_group_req __user *gr32 = (void *)optval;
72671 + struct compat_group_req __user *gr32 = (void __user *)optval;
72672 struct group_req __user *kgr =
72673 compat_alloc_user_space(sizeof(struct group_req));
72674 u32 interface;
72675 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72676 case MCAST_BLOCK_SOURCE:
72677 case MCAST_UNBLOCK_SOURCE:
72678 {
72679 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72680 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72681 struct group_source_req __user *kgsr = compat_alloc_user_space(
72682 sizeof(struct group_source_req));
72683 u32 interface;
72684 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72685 }
72686 case MCAST_MSFILTER:
72687 {
72688 - struct compat_group_filter __user *gf32 = (void *)optval;
72689 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72690 struct group_filter __user *kgf;
72691 u32 interface, fmode, numsrc;
72692
72693 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72694 char __user *optval, int __user *optlen,
72695 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72696 {
72697 - struct compat_group_filter __user *gf32 = (void *)optval;
72698 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72699 struct group_filter __user *kgf;
72700 int __user *koptlen;
72701 u32 interface, fmode, numsrc;
72702 diff --git a/net/core/datagram.c b/net/core/datagram.c
72703 index 68bbf9f..5ef0d12 100644
72704 --- a/net/core/datagram.c
72705 +++ b/net/core/datagram.c
72706 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72707 }
72708
72709 kfree_skb(skb);
72710 - atomic_inc(&sk->sk_drops);
72711 + atomic_inc_unchecked(&sk->sk_drops);
72712 sk_mem_reclaim_partial(sk);
72713
72714 return err;
72715 diff --git a/net/core/dev.c b/net/core/dev.c
72716 index c56cacf..b28e35f 100644
72717 --- a/net/core/dev.c
72718 +++ b/net/core/dev.c
72719 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72720 if (no_module && capable(CAP_NET_ADMIN))
72721 no_module = request_module("netdev-%s", name);
72722 if (no_module && capable(CAP_SYS_MODULE)) {
72723 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72724 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72725 +#else
72726 if (!request_module("%s", name))
72727 pr_err("Loading kernel module for a network device "
72728 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72729 "instead\n", name);
72730 +#endif
72731 }
72732 }
72733 EXPORT_SYMBOL(dev_load);
72734 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72735 {
72736 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72737 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72738 - atomic_long_inc(&dev->rx_dropped);
72739 + atomic_long_inc_unchecked(&dev->rx_dropped);
72740 kfree_skb(skb);
72741 return NET_RX_DROP;
72742 }
72743 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72744 nf_reset(skb);
72745
72746 if (unlikely(!is_skb_forwardable(dev, skb))) {
72747 - atomic_long_inc(&dev->rx_dropped);
72748 + atomic_long_inc_unchecked(&dev->rx_dropped);
72749 kfree_skb(skb);
72750 return NET_RX_DROP;
72751 }
72752 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72753
72754 struct dev_gso_cb {
72755 void (*destructor)(struct sk_buff *skb);
72756 -};
72757 +} __no_const;
72758
72759 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72760
72761 @@ -2970,7 +2974,7 @@ enqueue:
72762
72763 local_irq_restore(flags);
72764
72765 - atomic_long_inc(&skb->dev->rx_dropped);
72766 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72767 kfree_skb(skb);
72768 return NET_RX_DROP;
72769 }
72770 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72771 }
72772 EXPORT_SYMBOL(netif_rx_ni);
72773
72774 -static void net_tx_action(struct softirq_action *h)
72775 +static void net_tx_action(void)
72776 {
72777 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72778
72779 @@ -3333,7 +3337,7 @@ ncls:
72780 if (pt_prev) {
72781 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72782 } else {
72783 - atomic_long_inc(&skb->dev->rx_dropped);
72784 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72785 kfree_skb(skb);
72786 /* Jamal, now you will not able to escape explaining
72787 * me how you were going to use this. :-)
72788 @@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
72789 }
72790 EXPORT_SYMBOL(netif_napi_del);
72791
72792 -static void net_rx_action(struct softirq_action *h)
72793 +static void net_rx_action(void)
72794 {
72795 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72796 unsigned long time_limit = jiffies + 2;
72797 @@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72798 } else {
72799 netdev_stats_to_stats64(storage, &dev->stats);
72800 }
72801 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72802 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72803 return storage;
72804 }
72805 EXPORT_SYMBOL(dev_get_stats);
72806 diff --git a/net/core/flow.c b/net/core/flow.c
72807 index e318c7e..168b1d0 100644
72808 --- a/net/core/flow.c
72809 +++ b/net/core/flow.c
72810 @@ -61,7 +61,7 @@ struct flow_cache {
72811 struct timer_list rnd_timer;
72812 };
72813
72814 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72815 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72816 EXPORT_SYMBOL(flow_cache_genid);
72817 static struct flow_cache flow_cache_global;
72818 static struct kmem_cache *flow_cachep __read_mostly;
72819 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72820
72821 static int flow_entry_valid(struct flow_cache_entry *fle)
72822 {
72823 - if (atomic_read(&flow_cache_genid) != fle->genid)
72824 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72825 return 0;
72826 if (fle->object && !fle->object->ops->check(fle->object))
72827 return 0;
72828 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72829 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72830 fcp->hash_count++;
72831 }
72832 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72833 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72834 flo = fle->object;
72835 if (!flo)
72836 goto ret_object;
72837 @@ -280,7 +280,7 @@ nocache:
72838 }
72839 flo = resolver(net, key, family, dir, flo, ctx);
72840 if (fle) {
72841 - fle->genid = atomic_read(&flow_cache_genid);
72842 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72843 if (!IS_ERR(flo))
72844 fle->object = flo;
72845 else
72846 diff --git a/net/core/iovec.c b/net/core/iovec.c
72847 index c40f27e..7f49254 100644
72848 --- a/net/core/iovec.c
72849 +++ b/net/core/iovec.c
72850 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72851 if (m->msg_namelen) {
72852 if (mode == VERIFY_READ) {
72853 void __user *namep;
72854 - namep = (void __user __force *) m->msg_name;
72855 + namep = (void __force_user *) m->msg_name;
72856 err = move_addr_to_kernel(namep, m->msg_namelen,
72857 address);
72858 if (err < 0)
72859 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72860 }
72861
72862 size = m->msg_iovlen * sizeof(struct iovec);
72863 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72864 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72865 return -EFAULT;
72866
72867 m->msg_iov = iov;
72868 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72869 index 9083e82..1673203 100644
72870 --- a/net/core/rtnetlink.c
72871 +++ b/net/core/rtnetlink.c
72872 @@ -57,7 +57,7 @@ struct rtnl_link {
72873 rtnl_doit_func doit;
72874 rtnl_dumpit_func dumpit;
72875 rtnl_calcit_func calcit;
72876 -};
72877 +} __no_const;
72878
72879 static DEFINE_MUTEX(rtnl_mutex);
72880 static u16 min_ifinfo_dump_size;
72881 diff --git a/net/core/scm.c b/net/core/scm.c
72882 index ff52ad0..aff1c0f 100644
72883 --- a/net/core/scm.c
72884 +++ b/net/core/scm.c
72885 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72886 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72887 {
72888 struct cmsghdr __user *cm
72889 - = (__force struct cmsghdr __user *)msg->msg_control;
72890 + = (struct cmsghdr __force_user *)msg->msg_control;
72891 struct cmsghdr cmhdr;
72892 int cmlen = CMSG_LEN(len);
72893 int err;
72894 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72895 err = -EFAULT;
72896 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72897 goto out;
72898 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72899 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72900 goto out;
72901 cmlen = CMSG_SPACE(len);
72902 if (msg->msg_controllen < cmlen)
72903 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72904 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72905 {
72906 struct cmsghdr __user *cm
72907 - = (__force struct cmsghdr __user*)msg->msg_control;
72908 + = (struct cmsghdr __force_user *)msg->msg_control;
72909
72910 int fdmax = 0;
72911 int fdnum = scm->fp->count;
72912 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72913 if (fdnum < fdmax)
72914 fdmax = fdnum;
72915
72916 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72917 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72918 i++, cmfptr++)
72919 {
72920 int new_fd;
72921 diff --git a/net/core/sock.c b/net/core/sock.c
72922 index b23f174..b9a0d26 100644
72923 --- a/net/core/sock.c
72924 +++ b/net/core/sock.c
72925 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72926 struct sk_buff_head *list = &sk->sk_receive_queue;
72927
72928 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72929 - atomic_inc(&sk->sk_drops);
72930 + atomic_inc_unchecked(&sk->sk_drops);
72931 trace_sock_rcvqueue_full(sk, skb);
72932 return -ENOMEM;
72933 }
72934 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72935 return err;
72936
72937 if (!sk_rmem_schedule(sk, skb->truesize)) {
72938 - atomic_inc(&sk->sk_drops);
72939 + atomic_inc_unchecked(&sk->sk_drops);
72940 return -ENOBUFS;
72941 }
72942
72943 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72944 skb_dst_force(skb);
72945
72946 spin_lock_irqsave(&list->lock, flags);
72947 - skb->dropcount = atomic_read(&sk->sk_drops);
72948 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72949 __skb_queue_tail(list, skb);
72950 spin_unlock_irqrestore(&list->lock, flags);
72951
72952 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72953 skb->dev = NULL;
72954
72955 if (sk_rcvqueues_full(sk, skb)) {
72956 - atomic_inc(&sk->sk_drops);
72957 + atomic_inc_unchecked(&sk->sk_drops);
72958 goto discard_and_relse;
72959 }
72960 if (nested)
72961 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72962 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72963 } else if (sk_add_backlog(sk, skb)) {
72964 bh_unlock_sock(sk);
72965 - atomic_inc(&sk->sk_drops);
72966 + atomic_inc_unchecked(&sk->sk_drops);
72967 goto discard_and_relse;
72968 }
72969
72970 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72971 if (len > sizeof(peercred))
72972 len = sizeof(peercred);
72973 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72974 - if (copy_to_user(optval, &peercred, len))
72975 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72976 return -EFAULT;
72977 goto lenout;
72978 }
72979 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72980 return -ENOTCONN;
72981 if (lv < len)
72982 return -EINVAL;
72983 - if (copy_to_user(optval, address, len))
72984 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72985 return -EFAULT;
72986 goto lenout;
72987 }
72988 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72989
72990 if (len > lv)
72991 len = lv;
72992 - if (copy_to_user(optval, &v, len))
72993 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
72994 return -EFAULT;
72995 lenout:
72996 if (put_user(len, optlen))
72997 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72998 */
72999 smp_wmb();
73000 atomic_set(&sk->sk_refcnt, 1);
73001 - atomic_set(&sk->sk_drops, 0);
73002 + atomic_set_unchecked(&sk->sk_drops, 0);
73003 }
73004 EXPORT_SYMBOL(sock_init_data);
73005
73006 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
73007 index 02e75d1..9a57a7c 100644
73008 --- a/net/decnet/sysctl_net_decnet.c
73009 +++ b/net/decnet/sysctl_net_decnet.c
73010 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
73011
73012 if (len > *lenp) len = *lenp;
73013
73014 - if (copy_to_user(buffer, addr, len))
73015 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
73016 return -EFAULT;
73017
73018 *lenp = len;
73019 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
73020
73021 if (len > *lenp) len = *lenp;
73022
73023 - if (copy_to_user(buffer, devname, len))
73024 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
73025 return -EFAULT;
73026
73027 *lenp = len;
73028 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
73029 index 39a2d29..f39c0fe 100644
73030 --- a/net/econet/Kconfig
73031 +++ b/net/econet/Kconfig
73032 @@ -4,7 +4,7 @@
73033
73034 config ECONET
73035 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
73036 - depends on EXPERIMENTAL && INET
73037 + depends on EXPERIMENTAL && INET && BROKEN
73038 ---help---
73039 Econet is a fairly old and slow networking protocol mainly used by
73040 Acorn computers to access file and print servers. It uses native
73041 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
73042 index 92fc5f6..b790d91 100644
73043 --- a/net/ipv4/fib_frontend.c
73044 +++ b/net/ipv4/fib_frontend.c
73045 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
73046 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73047 fib_sync_up(dev);
73048 #endif
73049 - atomic_inc(&net->ipv4.dev_addr_genid);
73050 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73051 rt_cache_flush(dev_net(dev), -1);
73052 break;
73053 case NETDEV_DOWN:
73054 fib_del_ifaddr(ifa, NULL);
73055 - atomic_inc(&net->ipv4.dev_addr_genid);
73056 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73057 if (ifa->ifa_dev->ifa_list == NULL) {
73058 /* Last address was deleted from this interface.
73059 * Disable IP.
73060 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
73061 #ifdef CONFIG_IP_ROUTE_MULTIPATH
73062 fib_sync_up(dev);
73063 #endif
73064 - atomic_inc(&net->ipv4.dev_addr_genid);
73065 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
73066 rt_cache_flush(dev_net(dev), -1);
73067 break;
73068 case NETDEV_DOWN:
73069 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
73070 index 80106d8..232e898 100644
73071 --- a/net/ipv4/fib_semantics.c
73072 +++ b/net/ipv4/fib_semantics.c
73073 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
73074 nh->nh_saddr = inet_select_addr(nh->nh_dev,
73075 nh->nh_gw,
73076 nh->nh_parent->fib_scope);
73077 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
73078 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
73079
73080 return nh->nh_saddr;
73081 }
73082 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
73083 index ccee270..db23c3c 100644
73084 --- a/net/ipv4/inet_diag.c
73085 +++ b/net/ipv4/inet_diag.c
73086 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
73087 r->idiag_retrans = 0;
73088
73089 r->id.idiag_if = sk->sk_bound_dev_if;
73090 +
73091 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73092 + r->id.idiag_cookie[0] = 0;
73093 + r->id.idiag_cookie[1] = 0;
73094 +#else
73095 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
73096 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
73097 +#endif
73098
73099 r->id.idiag_sport = inet->inet_sport;
73100 r->id.idiag_dport = inet->inet_dport;
73101 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
73102 r->idiag_family = tw->tw_family;
73103 r->idiag_retrans = 0;
73104 r->id.idiag_if = tw->tw_bound_dev_if;
73105 +
73106 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73107 + r->id.idiag_cookie[0] = 0;
73108 + r->id.idiag_cookie[1] = 0;
73109 +#else
73110 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
73111 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
73112 +#endif
73113 +
73114 r->id.idiag_sport = tw->tw_sport;
73115 r->id.idiag_dport = tw->tw_dport;
73116 r->id.idiag_src[0] = tw->tw_rcv_saddr;
73117 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
73118 if (sk == NULL)
73119 goto unlock;
73120
73121 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73122 err = -ESTALE;
73123 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
73124 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
73125 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
73126 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
73127 goto out;
73128 +#endif
73129
73130 err = -ENOMEM;
73131 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
73132 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
73133 r->idiag_retrans = req->retrans;
73134
73135 r->id.idiag_if = sk->sk_bound_dev_if;
73136 +
73137 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73138 + r->id.idiag_cookie[0] = 0;
73139 + r->id.idiag_cookie[1] = 0;
73140 +#else
73141 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
73142 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
73143 +#endif
73144
73145 tmo = req->expires - jiffies;
73146 if (tmo < 0)
73147 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
73148 index 984ec65..97ac518 100644
73149 --- a/net/ipv4/inet_hashtables.c
73150 +++ b/net/ipv4/inet_hashtables.c
73151 @@ -18,12 +18,15 @@
73152 #include <linux/sched.h>
73153 #include <linux/slab.h>
73154 #include <linux/wait.h>
73155 +#include <linux/security.h>
73156
73157 #include <net/inet_connection_sock.h>
73158 #include <net/inet_hashtables.h>
73159 #include <net/secure_seq.h>
73160 #include <net/ip.h>
73161
73162 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
73163 +
73164 /*
73165 * Allocate and initialize a new local port bind bucket.
73166 * The bindhash mutex for snum's hash chain must be held here.
73167 @@ -530,6 +533,8 @@ ok:
73168 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
73169 spin_unlock(&head->lock);
73170
73171 + gr_update_task_in_ip_table(current, inet_sk(sk));
73172 +
73173 if (tw) {
73174 inet_twsk_deschedule(tw, death_row);
73175 while (twrefcnt) {
73176 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
73177 index 86f13c67..59a35b5 100644
73178 --- a/net/ipv4/inetpeer.c
73179 +++ b/net/ipv4/inetpeer.c
73180 @@ -436,8 +436,8 @@ relookup:
73181 if (p) {
73182 p->daddr = *daddr;
73183 atomic_set(&p->refcnt, 1);
73184 - atomic_set(&p->rid, 0);
73185 - atomic_set(&p->ip_id_count,
73186 + atomic_set_unchecked(&p->rid, 0);
73187 + atomic_set_unchecked(&p->ip_id_count,
73188 (daddr->family == AF_INET) ?
73189 secure_ip_id(daddr->addr.a4) :
73190 secure_ipv6_id(daddr->addr.a6));
73191 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
73192 index fdaabf2..0ec3205 100644
73193 --- a/net/ipv4/ip_fragment.c
73194 +++ b/net/ipv4/ip_fragment.c
73195 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
73196 return 0;
73197
73198 start = qp->rid;
73199 - end = atomic_inc_return(&peer->rid);
73200 + end = atomic_inc_return_unchecked(&peer->rid);
73201 qp->rid = end;
73202
73203 rc = qp->q.fragments && (end - start) > max;
73204 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
73205 index 09ff51b..d3968eb 100644
73206 --- a/net/ipv4/ip_sockglue.c
73207 +++ b/net/ipv4/ip_sockglue.c
73208 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73209 len = min_t(unsigned int, len, opt->optlen);
73210 if (put_user(len, optlen))
73211 return -EFAULT;
73212 - if (copy_to_user(optval, opt->__data, len))
73213 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
73214 + copy_to_user(optval, opt->__data, len))
73215 return -EFAULT;
73216 return 0;
73217 }
73218 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
73219 if (sk->sk_type != SOCK_STREAM)
73220 return -ENOPROTOOPT;
73221
73222 - msg.msg_control = optval;
73223 + msg.msg_control = (void __force_kernel *)optval;
73224 msg.msg_controllen = len;
73225 msg.msg_flags = flags;
73226
73227 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
73228 index 99ec116..c5628fe 100644
73229 --- a/net/ipv4/ipconfig.c
73230 +++ b/net/ipv4/ipconfig.c
73231 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
73232
73233 mm_segment_t oldfs = get_fs();
73234 set_fs(get_ds());
73235 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73236 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73237 set_fs(oldfs);
73238 return res;
73239 }
73240 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
73241
73242 mm_segment_t oldfs = get_fs();
73243 set_fs(get_ds());
73244 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
73245 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
73246 set_fs(oldfs);
73247 return res;
73248 }
73249 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73250
73251 mm_segment_t oldfs = get_fs();
73252 set_fs(get_ds());
73253 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73254 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73255 set_fs(oldfs);
73256 return res;
73257 }
73258 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73259 index 2133c30..5c4b40b 100644
73260 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73261 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73262 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73263
73264 *len = 0;
73265
73266 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73267 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73268 if (*octets == NULL)
73269 return 0;
73270
73271 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73272 index 43d4c3b..1914409 100644
73273 --- a/net/ipv4/ping.c
73274 +++ b/net/ipv4/ping.c
73275 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73276 sk_rmem_alloc_get(sp),
73277 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73278 atomic_read(&sp->sk_refcnt), sp,
73279 - atomic_read(&sp->sk_drops), len);
73280 + atomic_read_unchecked(&sp->sk_drops), len);
73281 }
73282
73283 static int ping_seq_show(struct seq_file *seq, void *v)
73284 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73285 index 007e2eb..85a18a0 100644
73286 --- a/net/ipv4/raw.c
73287 +++ b/net/ipv4/raw.c
73288 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73289 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73290 {
73291 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73292 - atomic_inc(&sk->sk_drops);
73293 + atomic_inc_unchecked(&sk->sk_drops);
73294 kfree_skb(skb);
73295 return NET_RX_DROP;
73296 }
73297 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73298
73299 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73300 {
73301 + struct icmp_filter filter;
73302 +
73303 if (optlen > sizeof(struct icmp_filter))
73304 optlen = sizeof(struct icmp_filter);
73305 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73306 + if (copy_from_user(&filter, optval, optlen))
73307 return -EFAULT;
73308 + raw_sk(sk)->filter = filter;
73309 return 0;
73310 }
73311
73312 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73313 {
73314 int len, ret = -EFAULT;
73315 + struct icmp_filter filter;
73316
73317 if (get_user(len, optlen))
73318 goto out;
73319 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73320 if (len > sizeof(struct icmp_filter))
73321 len = sizeof(struct icmp_filter);
73322 ret = -EFAULT;
73323 - if (put_user(len, optlen) ||
73324 - copy_to_user(optval, &raw_sk(sk)->filter, len))
73325 + filter = raw_sk(sk)->filter;
73326 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73327 goto out;
73328 ret = 0;
73329 out: return ret;
73330 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73331 sk_wmem_alloc_get(sp),
73332 sk_rmem_alloc_get(sp),
73333 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73334 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73335 + atomic_read(&sp->sk_refcnt),
73336 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73337 + NULL,
73338 +#else
73339 + sp,
73340 +#endif
73341 + atomic_read_unchecked(&sp->sk_drops));
73342 }
73343
73344 static int raw_seq_show(struct seq_file *seq, void *v)
73345 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73346 index 94cdbc5..0cb0063 100644
73347 --- a/net/ipv4/route.c
73348 +++ b/net/ipv4/route.c
73349 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73350
73351 static inline int rt_genid(struct net *net)
73352 {
73353 - return atomic_read(&net->ipv4.rt_genid);
73354 + return atomic_read_unchecked(&net->ipv4.rt_genid);
73355 }
73356
73357 #ifdef CONFIG_PROC_FS
73358 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73359 unsigned char shuffle;
73360
73361 get_random_bytes(&shuffle, sizeof(shuffle));
73362 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73363 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73364 redirect_genid++;
73365 }
73366
73367 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73368 error = rt->dst.error;
73369 if (peer) {
73370 inet_peer_refcheck(rt->peer);
73371 - id = atomic_read(&peer->ip_id_count) & 0xffff;
73372 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73373 if (peer->tcp_ts_stamp) {
73374 ts = peer->tcp_ts;
73375 tsage = get_seconds() - peer->tcp_ts_stamp;
73376 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73377 index eb90aa8..22bf114 100644
73378 --- a/net/ipv4/tcp_ipv4.c
73379 +++ b/net/ipv4/tcp_ipv4.c
73380 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73381 int sysctl_tcp_low_latency __read_mostly;
73382 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73383
73384 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73385 +extern int grsec_enable_blackhole;
73386 +#endif
73387
73388 #ifdef CONFIG_TCP_MD5SIG
73389 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73390 @@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73391 return 0;
73392
73393 reset:
73394 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73395 + if (!grsec_enable_blackhole)
73396 +#endif
73397 tcp_v4_send_reset(rsk, skb);
73398 discard:
73399 kfree_skb(skb);
73400 @@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73401 TCP_SKB_CB(skb)->sacked = 0;
73402
73403 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73404 - if (!sk)
73405 + if (!sk) {
73406 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73407 + ret = 1;
73408 +#endif
73409 goto no_tcp_socket;
73410 -
73411 + }
73412 process:
73413 - if (sk->sk_state == TCP_TIME_WAIT)
73414 + if (sk->sk_state == TCP_TIME_WAIT) {
73415 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73416 + ret = 2;
73417 +#endif
73418 goto do_time_wait;
73419 + }
73420
73421 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73422 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73423 @@ -1749,6 +1762,10 @@ no_tcp_socket:
73424 bad_packet:
73425 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73426 } else {
73427 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73428 + if (!grsec_enable_blackhole || (ret == 1 &&
73429 + (skb->dev->flags & IFF_LOOPBACK)))
73430 +#endif
73431 tcp_v4_send_reset(NULL, skb);
73432 }
73433
73434 @@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73435 0, /* non standard timer */
73436 0, /* open_requests have no inode */
73437 atomic_read(&sk->sk_refcnt),
73438 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73439 + NULL,
73440 +#else
73441 req,
73442 +#endif
73443 len);
73444 }
73445
73446 @@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73447 sock_i_uid(sk),
73448 icsk->icsk_probes_out,
73449 sock_i_ino(sk),
73450 - atomic_read(&sk->sk_refcnt), sk,
73451 + atomic_read(&sk->sk_refcnt),
73452 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73453 + NULL,
73454 +#else
73455 + sk,
73456 +#endif
73457 jiffies_to_clock_t(icsk->icsk_rto),
73458 jiffies_to_clock_t(icsk->icsk_ack.ato),
73459 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73460 @@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73461 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73462 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73463 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73464 - atomic_read(&tw->tw_refcnt), tw, len);
73465 + atomic_read(&tw->tw_refcnt),
73466 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73467 + NULL,
73468 +#else
73469 + tw,
73470 +#endif
73471 + len);
73472 }
73473
73474 #define TMPSZ 150
73475 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73476 index 66363b6..b0654a3 100644
73477 --- a/net/ipv4/tcp_minisocks.c
73478 +++ b/net/ipv4/tcp_minisocks.c
73479 @@ -27,6 +27,10 @@
73480 #include <net/inet_common.h>
73481 #include <net/xfrm.h>
73482
73483 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73484 +extern int grsec_enable_blackhole;
73485 +#endif
73486 +
73487 int sysctl_tcp_syncookies __read_mostly = 1;
73488 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73489
73490 @@ -751,6 +755,10 @@ listen_overflow:
73491
73492 embryonic_reset:
73493 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73494 +
73495 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73496 + if (!grsec_enable_blackhole)
73497 +#endif
73498 if (!(flg & TCP_FLAG_RST))
73499 req->rsk_ops->send_reset(sk, skb);
73500
73501 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73502 index 85ee7eb..53277ab 100644
73503 --- a/net/ipv4/tcp_probe.c
73504 +++ b/net/ipv4/tcp_probe.c
73505 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73506 if (cnt + width >= len)
73507 break;
73508
73509 - if (copy_to_user(buf + cnt, tbuf, width))
73510 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73511 return -EFAULT;
73512 cnt += width;
73513 }
73514 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73515 index 2e0f0af..e2948bf 100644
73516 --- a/net/ipv4/tcp_timer.c
73517 +++ b/net/ipv4/tcp_timer.c
73518 @@ -22,6 +22,10 @@
73519 #include <linux/gfp.h>
73520 #include <net/tcp.h>
73521
73522 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73523 +extern int grsec_lastack_retries;
73524 +#endif
73525 +
73526 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73527 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73528 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73529 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73530 }
73531 }
73532
73533 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73534 + if ((sk->sk_state == TCP_LAST_ACK) &&
73535 + (grsec_lastack_retries > 0) &&
73536 + (grsec_lastack_retries < retry_until))
73537 + retry_until = grsec_lastack_retries;
73538 +#endif
73539 +
73540 if (retransmits_timed_out(sk, retry_until,
73541 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73542 /* Has it gone just too far? */
73543 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73544 index 5a65eea..bd913a1 100644
73545 --- a/net/ipv4/udp.c
73546 +++ b/net/ipv4/udp.c
73547 @@ -86,6 +86,7 @@
73548 #include <linux/types.h>
73549 #include <linux/fcntl.h>
73550 #include <linux/module.h>
73551 +#include <linux/security.h>
73552 #include <linux/socket.h>
73553 #include <linux/sockios.h>
73554 #include <linux/igmp.h>
73555 @@ -108,6 +109,10 @@
73556 #include <trace/events/udp.h>
73557 #include "udp_impl.h"
73558
73559 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73560 +extern int grsec_enable_blackhole;
73561 +#endif
73562 +
73563 struct udp_table udp_table __read_mostly;
73564 EXPORT_SYMBOL(udp_table);
73565
73566 @@ -565,6 +570,9 @@ found:
73567 return s;
73568 }
73569
73570 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73571 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73572 +
73573 /*
73574 * This routine is called by the ICMP module when it gets some
73575 * sort of error condition. If err < 0 then the socket should
73576 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73577 dport = usin->sin_port;
73578 if (dport == 0)
73579 return -EINVAL;
73580 +
73581 + err = gr_search_udp_sendmsg(sk, usin);
73582 + if (err)
73583 + return err;
73584 } else {
73585 if (sk->sk_state != TCP_ESTABLISHED)
73586 return -EDESTADDRREQ;
73587 +
73588 + err = gr_search_udp_sendmsg(sk, NULL);
73589 + if (err)
73590 + return err;
73591 +
73592 daddr = inet->inet_daddr;
73593 dport = inet->inet_dport;
73594 /* Open fast path for connected socket.
73595 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73596 udp_lib_checksum_complete(skb)) {
73597 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73598 IS_UDPLITE(sk));
73599 - atomic_inc(&sk->sk_drops);
73600 + atomic_inc_unchecked(&sk->sk_drops);
73601 __skb_unlink(skb, rcvq);
73602 __skb_queue_tail(&list_kill, skb);
73603 }
73604 @@ -1185,6 +1202,10 @@ try_again:
73605 if (!skb)
73606 goto out;
73607
73608 + err = gr_search_udp_recvmsg(sk, skb);
73609 + if (err)
73610 + goto out_free;
73611 +
73612 ulen = skb->len - sizeof(struct udphdr);
73613 copied = len;
73614 if (copied > ulen)
73615 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73616
73617 drop:
73618 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73619 - atomic_inc(&sk->sk_drops);
73620 + atomic_inc_unchecked(&sk->sk_drops);
73621 kfree_skb(skb);
73622 return -1;
73623 }
73624 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73625 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73626
73627 if (!skb1) {
73628 - atomic_inc(&sk->sk_drops);
73629 + atomic_inc_unchecked(&sk->sk_drops);
73630 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73631 IS_UDPLITE(sk));
73632 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73633 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73634 goto csum_error;
73635
73636 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73637 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73638 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73639 +#endif
73640 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73641
73642 /*
73643 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73644 sk_wmem_alloc_get(sp),
73645 sk_rmem_alloc_get(sp),
73646 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73647 - atomic_read(&sp->sk_refcnt), sp,
73648 - atomic_read(&sp->sk_drops), len);
73649 + atomic_read(&sp->sk_refcnt),
73650 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73651 + NULL,
73652 +#else
73653 + sp,
73654 +#endif
73655 + atomic_read_unchecked(&sp->sk_drops), len);
73656 }
73657
73658 int udp4_seq_show(struct seq_file *seq, void *v)
73659 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73660 index 836c4ea..cbb74dc 100644
73661 --- a/net/ipv6/addrconf.c
73662 +++ b/net/ipv6/addrconf.c
73663 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73664 p.iph.ihl = 5;
73665 p.iph.protocol = IPPROTO_IPV6;
73666 p.iph.ttl = 64;
73667 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73668 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73669
73670 if (ops->ndo_do_ioctl) {
73671 mm_segment_t oldfs = get_fs();
73672 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73673 index 1567fb1..29af910 100644
73674 --- a/net/ipv6/inet6_connection_sock.c
73675 +++ b/net/ipv6/inet6_connection_sock.c
73676 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73677 #ifdef CONFIG_XFRM
73678 {
73679 struct rt6_info *rt = (struct rt6_info *)dst;
73680 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73681 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73682 }
73683 #endif
73684 }
73685 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73686 #ifdef CONFIG_XFRM
73687 if (dst) {
73688 struct rt6_info *rt = (struct rt6_info *)dst;
73689 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73690 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73691 __sk_dst_reset(sk);
73692 dst = NULL;
73693 }
73694 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73695 index 26cb08c..8af9877 100644
73696 --- a/net/ipv6/ipv6_sockglue.c
73697 +++ b/net/ipv6/ipv6_sockglue.c
73698 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73699 if (sk->sk_type != SOCK_STREAM)
73700 return -ENOPROTOOPT;
73701
73702 - msg.msg_control = optval;
73703 + msg.msg_control = (void __force_kernel *)optval;
73704 msg.msg_controllen = len;
73705 msg.msg_flags = flags;
73706
73707 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73708 index 361ebf3..d5628fb 100644
73709 --- a/net/ipv6/raw.c
73710 +++ b/net/ipv6/raw.c
73711 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73712 {
73713 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73714 skb_checksum_complete(skb)) {
73715 - atomic_inc(&sk->sk_drops);
73716 + atomic_inc_unchecked(&sk->sk_drops);
73717 kfree_skb(skb);
73718 return NET_RX_DROP;
73719 }
73720 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73721 struct raw6_sock *rp = raw6_sk(sk);
73722
73723 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73724 - atomic_inc(&sk->sk_drops);
73725 + atomic_inc_unchecked(&sk->sk_drops);
73726 kfree_skb(skb);
73727 return NET_RX_DROP;
73728 }
73729 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73730
73731 if (inet->hdrincl) {
73732 if (skb_checksum_complete(skb)) {
73733 - atomic_inc(&sk->sk_drops);
73734 + atomic_inc_unchecked(&sk->sk_drops);
73735 kfree_skb(skb);
73736 return NET_RX_DROP;
73737 }
73738 @@ -601,7 +601,7 @@ out:
73739 return err;
73740 }
73741
73742 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73743 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73744 struct flowi6 *fl6, struct dst_entry **dstp,
73745 unsigned int flags)
73746 {
73747 @@ -909,12 +909,15 @@ do_confirm:
73748 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73749 char __user *optval, int optlen)
73750 {
73751 + struct icmp6_filter filter;
73752 +
73753 switch (optname) {
73754 case ICMPV6_FILTER:
73755 if (optlen > sizeof(struct icmp6_filter))
73756 optlen = sizeof(struct icmp6_filter);
73757 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73758 + if (copy_from_user(&filter, optval, optlen))
73759 return -EFAULT;
73760 + raw6_sk(sk)->filter = filter;
73761 return 0;
73762 default:
73763 return -ENOPROTOOPT;
73764 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73765 char __user *optval, int __user *optlen)
73766 {
73767 int len;
73768 + struct icmp6_filter filter;
73769
73770 switch (optname) {
73771 case ICMPV6_FILTER:
73772 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73773 len = sizeof(struct icmp6_filter);
73774 if (put_user(len, optlen))
73775 return -EFAULT;
73776 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73777 + filter = raw6_sk(sk)->filter;
73778 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73779 return -EFAULT;
73780 return 0;
73781 default:
73782 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73783 0, 0L, 0,
73784 sock_i_uid(sp), 0,
73785 sock_i_ino(sp),
73786 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73787 + atomic_read(&sp->sk_refcnt),
73788 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73789 + NULL,
73790 +#else
73791 + sp,
73792 +#endif
73793 + atomic_read_unchecked(&sp->sk_drops));
73794 }
73795
73796 static int raw6_seq_show(struct seq_file *seq, void *v)
73797 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73798 index b859e4a..f9d1589 100644
73799 --- a/net/ipv6/tcp_ipv6.c
73800 +++ b/net/ipv6/tcp_ipv6.c
73801 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73802 }
73803 #endif
73804
73805 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73806 +extern int grsec_enable_blackhole;
73807 +#endif
73808 +
73809 static void tcp_v6_hash(struct sock *sk)
73810 {
73811 if (sk->sk_state != TCP_CLOSE) {
73812 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73813 return 0;
73814
73815 reset:
73816 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73817 + if (!grsec_enable_blackhole)
73818 +#endif
73819 tcp_v6_send_reset(sk, skb);
73820 discard:
73821 if (opt_skb)
73822 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73823 TCP_SKB_CB(skb)->sacked = 0;
73824
73825 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73826 - if (!sk)
73827 + if (!sk) {
73828 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73829 + ret = 1;
73830 +#endif
73831 goto no_tcp_socket;
73832 + }
73833
73834 process:
73835 - if (sk->sk_state == TCP_TIME_WAIT)
73836 + if (sk->sk_state == TCP_TIME_WAIT) {
73837 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73838 + ret = 2;
73839 +#endif
73840 goto do_time_wait;
73841 + }
73842
73843 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73844 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73845 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73846 bad_packet:
73847 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73848 } else {
73849 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73850 + if (!grsec_enable_blackhole || (ret == 1 &&
73851 + (skb->dev->flags & IFF_LOOPBACK)))
73852 +#endif
73853 tcp_v6_send_reset(NULL, skb);
73854 }
73855
73856 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73857 uid,
73858 0, /* non standard timer */
73859 0, /* open_requests have no inode */
73860 - 0, req);
73861 + 0,
73862 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73863 + NULL
73864 +#else
73865 + req
73866 +#endif
73867 + );
73868 }
73869
73870 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73871 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73872 sock_i_uid(sp),
73873 icsk->icsk_probes_out,
73874 sock_i_ino(sp),
73875 - atomic_read(&sp->sk_refcnt), sp,
73876 + atomic_read(&sp->sk_refcnt),
73877 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73878 + NULL,
73879 +#else
73880 + sp,
73881 +#endif
73882 jiffies_to_clock_t(icsk->icsk_rto),
73883 jiffies_to_clock_t(icsk->icsk_ack.ato),
73884 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73885 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73886 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73887 tw->tw_substate, 0, 0,
73888 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73889 - atomic_read(&tw->tw_refcnt), tw);
73890 + atomic_read(&tw->tw_refcnt),
73891 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73892 + NULL
73893 +#else
73894 + tw
73895 +#endif
73896 + );
73897 }
73898
73899 static int tcp6_seq_show(struct seq_file *seq, void *v)
73900 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73901 index 8c25419..47a51ae 100644
73902 --- a/net/ipv6/udp.c
73903 +++ b/net/ipv6/udp.c
73904 @@ -50,6 +50,10 @@
73905 #include <linux/seq_file.h>
73906 #include "udp_impl.h"
73907
73908 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73909 +extern int grsec_enable_blackhole;
73910 +#endif
73911 +
73912 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73913 {
73914 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73915 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73916
73917 return 0;
73918 drop:
73919 - atomic_inc(&sk->sk_drops);
73920 + atomic_inc_unchecked(&sk->sk_drops);
73921 drop_no_sk_drops_inc:
73922 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73923 kfree_skb(skb);
73924 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73925 continue;
73926 }
73927 drop:
73928 - atomic_inc(&sk->sk_drops);
73929 + atomic_inc_unchecked(&sk->sk_drops);
73930 UDP6_INC_STATS_BH(sock_net(sk),
73931 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73932 UDP6_INC_STATS_BH(sock_net(sk),
73933 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73934 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73935 proto == IPPROTO_UDPLITE);
73936
73937 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73938 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73939 +#endif
73940 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73941
73942 kfree_skb(skb);
73943 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73944 if (!sock_owned_by_user(sk))
73945 udpv6_queue_rcv_skb(sk, skb);
73946 else if (sk_add_backlog(sk, skb)) {
73947 - atomic_inc(&sk->sk_drops);
73948 + atomic_inc_unchecked(&sk->sk_drops);
73949 bh_unlock_sock(sk);
73950 sock_put(sk);
73951 goto discard;
73952 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73953 0, 0L, 0,
73954 sock_i_uid(sp), 0,
73955 sock_i_ino(sp),
73956 - atomic_read(&sp->sk_refcnt), sp,
73957 - atomic_read(&sp->sk_drops));
73958 + atomic_read(&sp->sk_refcnt),
73959 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73960 + NULL,
73961 +#else
73962 + sp,
73963 +#endif
73964 + atomic_read_unchecked(&sp->sk_drops));
73965 }
73966
73967 int udp6_seq_show(struct seq_file *seq, void *v)
73968 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73969 index 253695d..9481ce8 100644
73970 --- a/net/irda/ircomm/ircomm_tty.c
73971 +++ b/net/irda/ircomm/ircomm_tty.c
73972 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73973 add_wait_queue(&self->open_wait, &wait);
73974
73975 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73976 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73977 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73978
73979 /* As far as I can see, we protect open_count - Jean II */
73980 spin_lock_irqsave(&self->spinlock, flags);
73981 if (!tty_hung_up_p(filp)) {
73982 extra_count = 1;
73983 - self->open_count--;
73984 + local_dec(&self->open_count);
73985 }
73986 spin_unlock_irqrestore(&self->spinlock, flags);
73987 - self->blocked_open++;
73988 + local_inc(&self->blocked_open);
73989
73990 while (1) {
73991 if (tty->termios->c_cflag & CBAUD) {
73992 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73993 }
73994
73995 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73996 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73997 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73998
73999 schedule();
74000 }
74001 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
74002 if (extra_count) {
74003 /* ++ is not atomic, so this should be protected - Jean II */
74004 spin_lock_irqsave(&self->spinlock, flags);
74005 - self->open_count++;
74006 + local_inc(&self->open_count);
74007 spin_unlock_irqrestore(&self->spinlock, flags);
74008 }
74009 - self->blocked_open--;
74010 + local_dec(&self->blocked_open);
74011
74012 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
74013 - __FILE__,__LINE__, tty->driver->name, self->open_count);
74014 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
74015
74016 if (!retval)
74017 self->flags |= ASYNC_NORMAL_ACTIVE;
74018 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
74019 }
74020 /* ++ is not atomic, so this should be protected - Jean II */
74021 spin_lock_irqsave(&self->spinlock, flags);
74022 - self->open_count++;
74023 + local_inc(&self->open_count);
74024
74025 tty->driver_data = self;
74026 self->tty = tty;
74027 spin_unlock_irqrestore(&self->spinlock, flags);
74028
74029 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
74030 - self->line, self->open_count);
74031 + self->line, local_read(&self->open_count));
74032
74033 /* Not really used by us, but lets do it anyway */
74034 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
74035 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74036 return;
74037 }
74038
74039 - if ((tty->count == 1) && (self->open_count != 1)) {
74040 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
74041 /*
74042 * Uh, oh. tty->count is 1, which means that the tty
74043 * structure will be freed. state->count should always
74044 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74045 */
74046 IRDA_DEBUG(0, "%s(), bad serial port count; "
74047 "tty->count is 1, state->count is %d\n", __func__ ,
74048 - self->open_count);
74049 - self->open_count = 1;
74050 + local_read(&self->open_count));
74051 + local_set(&self->open_count, 1);
74052 }
74053
74054 - if (--self->open_count < 0) {
74055 + if (local_dec_return(&self->open_count) < 0) {
74056 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
74057 - __func__, self->line, self->open_count);
74058 - self->open_count = 0;
74059 + __func__, self->line, local_read(&self->open_count));
74060 + local_set(&self->open_count, 0);
74061 }
74062 - if (self->open_count) {
74063 + if (local_read(&self->open_count)) {
74064 spin_unlock_irqrestore(&self->spinlock, flags);
74065
74066 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
74067 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
74068 tty->closing = 0;
74069 self->tty = NULL;
74070
74071 - if (self->blocked_open) {
74072 + if (local_read(&self->blocked_open)) {
74073 if (self->close_delay)
74074 schedule_timeout_interruptible(self->close_delay);
74075 wake_up_interruptible(&self->open_wait);
74076 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
74077 spin_lock_irqsave(&self->spinlock, flags);
74078 self->flags &= ~ASYNC_NORMAL_ACTIVE;
74079 self->tty = NULL;
74080 - self->open_count = 0;
74081 + local_set(&self->open_count, 0);
74082 spin_unlock_irqrestore(&self->spinlock, flags);
74083
74084 wake_up_interruptible(&self->open_wait);
74085 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
74086 seq_putc(m, '\n');
74087
74088 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
74089 - seq_printf(m, "Open count: %d\n", self->open_count);
74090 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
74091 seq_printf(m, "Max data size: %d\n", self->max_data_size);
74092 seq_printf(m, "Max header size: %d\n", self->max_header_size);
74093
74094 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
74095 index 274d150..656a144 100644
74096 --- a/net/iucv/af_iucv.c
74097 +++ b/net/iucv/af_iucv.c
74098 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
74099
74100 write_lock_bh(&iucv_sk_list.lock);
74101
74102 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
74103 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74104 while (__iucv_get_sock_by_name(name)) {
74105 sprintf(name, "%08x",
74106 - atomic_inc_return(&iucv_sk_list.autobind_name));
74107 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
74108 }
74109
74110 write_unlock_bh(&iucv_sk_list.lock);
74111 diff --git a/net/key/af_key.c b/net/key/af_key.c
74112 index 1e733e9..3d73c9f 100644
74113 --- a/net/key/af_key.c
74114 +++ b/net/key/af_key.c
74115 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
74116 static u32 get_acqseq(void)
74117 {
74118 u32 res;
74119 - static atomic_t acqseq;
74120 + static atomic_unchecked_t acqseq;
74121
74122 do {
74123 - res = atomic_inc_return(&acqseq);
74124 + res = atomic_inc_return_unchecked(&acqseq);
74125 } while (!res);
74126 return res;
74127 }
74128 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
74129 index 73495f1..ad51356 100644
74130 --- a/net/mac80211/ieee80211_i.h
74131 +++ b/net/mac80211/ieee80211_i.h
74132 @@ -27,6 +27,7 @@
74133 #include <net/ieee80211_radiotap.h>
74134 #include <net/cfg80211.h>
74135 #include <net/mac80211.h>
74136 +#include <asm/local.h>
74137 #include "key.h"
74138 #include "sta_info.h"
74139
74140 @@ -764,7 +765,7 @@ struct ieee80211_local {
74141 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
74142 spinlock_t queue_stop_reason_lock;
74143
74144 - int open_count;
74145 + local_t open_count;
74146 int monitors, cooked_mntrs;
74147 /* number of interfaces with corresponding FIF_ flags */
74148 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
74149 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
74150 index 30d7355..e260095 100644
74151 --- a/net/mac80211/iface.c
74152 +++ b/net/mac80211/iface.c
74153 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74154 break;
74155 }
74156
74157 - if (local->open_count == 0) {
74158 + if (local_read(&local->open_count) == 0) {
74159 res = drv_start(local);
74160 if (res)
74161 goto err_del_bss;
74162 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74163 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
74164
74165 if (!is_valid_ether_addr(dev->dev_addr)) {
74166 - if (!local->open_count)
74167 + if (!local_read(&local->open_count))
74168 drv_stop(local);
74169 return -EADDRNOTAVAIL;
74170 }
74171 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74172 mutex_unlock(&local->mtx);
74173
74174 if (coming_up)
74175 - local->open_count++;
74176 + local_inc(&local->open_count);
74177
74178 if (hw_reconf_flags) {
74179 ieee80211_hw_config(local, hw_reconf_flags);
74180 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
74181 err_del_interface:
74182 drv_remove_interface(local, &sdata->vif);
74183 err_stop:
74184 - if (!local->open_count)
74185 + if (!local_read(&local->open_count))
74186 drv_stop(local);
74187 err_del_bss:
74188 sdata->bss = NULL;
74189 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74190 }
74191
74192 if (going_down)
74193 - local->open_count--;
74194 + local_dec(&local->open_count);
74195
74196 switch (sdata->vif.type) {
74197 case NL80211_IFTYPE_AP_VLAN:
74198 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
74199
74200 ieee80211_recalc_ps(local, -1);
74201
74202 - if (local->open_count == 0) {
74203 + if (local_read(&local->open_count) == 0) {
74204 if (local->ops->napi_poll)
74205 napi_disable(&local->napi);
74206 ieee80211_clear_tx_pending(local);
74207 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
74208 index 7d9b21d..0687004 100644
74209 --- a/net/mac80211/main.c
74210 +++ b/net/mac80211/main.c
74211 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
74212 local->hw.conf.power_level = power;
74213 }
74214
74215 - if (changed && local->open_count) {
74216 + if (changed && local_read(&local->open_count)) {
74217 ret = drv_config(local, changed);
74218 /*
74219 * Goal:
74220 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
74221 index 9ee7164..56c5061 100644
74222 --- a/net/mac80211/pm.c
74223 +++ b/net/mac80211/pm.c
74224 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74225 struct ieee80211_sub_if_data *sdata;
74226 struct sta_info *sta;
74227
74228 - if (!local->open_count)
74229 + if (!local_read(&local->open_count))
74230 goto suspend;
74231
74232 ieee80211_scan_cancel(local);
74233 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74234 cancel_work_sync(&local->dynamic_ps_enable_work);
74235 del_timer_sync(&local->dynamic_ps_timer);
74236
74237 - local->wowlan = wowlan && local->open_count;
74238 + local->wowlan = wowlan && local_read(&local->open_count);
74239 if (local->wowlan) {
74240 int err = drv_suspend(local, wowlan);
74241 if (err < 0) {
74242 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
74243 }
74244
74245 /* stop hardware - this must stop RX */
74246 - if (local->open_count)
74247 + if (local_read(&local->open_count))
74248 ieee80211_stop_device(local);
74249
74250 suspend:
74251 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74252 index 5a5a776..9600b11 100644
74253 --- a/net/mac80211/rate.c
74254 +++ b/net/mac80211/rate.c
74255 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74256
74257 ASSERT_RTNL();
74258
74259 - if (local->open_count)
74260 + if (local_read(&local->open_count))
74261 return -EBUSY;
74262
74263 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74264 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74265 index c97a065..ff61928 100644
74266 --- a/net/mac80211/rc80211_pid_debugfs.c
74267 +++ b/net/mac80211/rc80211_pid_debugfs.c
74268 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74269
74270 spin_unlock_irqrestore(&events->lock, status);
74271
74272 - if (copy_to_user(buf, pb, p))
74273 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74274 return -EFAULT;
74275
74276 return p;
74277 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74278 index d5230ec..c604b21 100644
74279 --- a/net/mac80211/util.c
74280 +++ b/net/mac80211/util.c
74281 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74282 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74283
74284 /* everything else happens only if HW was up & running */
74285 - if (!local->open_count)
74286 + if (!local_read(&local->open_count))
74287 goto wake_up;
74288
74289 /*
74290 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74291 index d5597b7..ab6d39c 100644
74292 --- a/net/netfilter/Kconfig
74293 +++ b/net/netfilter/Kconfig
74294 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74295
74296 To compile it as a module, choose M here. If unsure, say N.
74297
74298 +config NETFILTER_XT_MATCH_GRADM
74299 + tristate '"gradm" match support'
74300 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74301 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74302 + ---help---
74303 + The gradm match allows to match on grsecurity RBAC being enabled.
74304 + It is useful when iptables rules are applied early on bootup to
74305 + prevent connections to the machine (except from a trusted host)
74306 + while the RBAC system is disabled.
74307 +
74308 config NETFILTER_XT_MATCH_HASHLIMIT
74309 tristate '"hashlimit" match support'
74310 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74311 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74312 index 1a02853..5d8c22e 100644
74313 --- a/net/netfilter/Makefile
74314 +++ b/net/netfilter/Makefile
74315 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74316 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74317 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74318 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74319 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74320 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74321 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74322 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74323 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74324 index 29fa5ba..8debc79 100644
74325 --- a/net/netfilter/ipvs/ip_vs_conn.c
74326 +++ b/net/netfilter/ipvs/ip_vs_conn.c
74327 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74328 /* Increase the refcnt counter of the dest */
74329 atomic_inc(&dest->refcnt);
74330
74331 - conn_flags = atomic_read(&dest->conn_flags);
74332 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
74333 if (cp->protocol != IPPROTO_UDP)
74334 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74335 /* Bind with the destination and its corresponding transmitter */
74336 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74337 atomic_set(&cp->refcnt, 1);
74338
74339 atomic_set(&cp->n_control, 0);
74340 - atomic_set(&cp->in_pkts, 0);
74341 + atomic_set_unchecked(&cp->in_pkts, 0);
74342
74343 atomic_inc(&ipvs->conn_count);
74344 if (flags & IP_VS_CONN_F_NO_CPORT)
74345 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74346
74347 /* Don't drop the entry if its number of incoming packets is not
74348 located in [0, 8] */
74349 - i = atomic_read(&cp->in_pkts);
74350 + i = atomic_read_unchecked(&cp->in_pkts);
74351 if (i > 8 || i < 0) return 0;
74352
74353 if (!todrop_rate[i]) return 0;
74354 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74355 index 6dc7d7d..e45913a 100644
74356 --- a/net/netfilter/ipvs/ip_vs_core.c
74357 +++ b/net/netfilter/ipvs/ip_vs_core.c
74358 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74359 ret = cp->packet_xmit(skb, cp, pd->pp);
74360 /* do not touch skb anymore */
74361
74362 - atomic_inc(&cp->in_pkts);
74363 + atomic_inc_unchecked(&cp->in_pkts);
74364 ip_vs_conn_put(cp);
74365 return ret;
74366 }
74367 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74368 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74369 pkts = sysctl_sync_threshold(ipvs);
74370 else
74371 - pkts = atomic_add_return(1, &cp->in_pkts);
74372 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74373
74374 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74375 cp->protocol == IPPROTO_SCTP) {
74376 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74377 index e1a66cf..0910076 100644
74378 --- a/net/netfilter/ipvs/ip_vs_ctl.c
74379 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74380 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74381 ip_vs_rs_hash(ipvs, dest);
74382 write_unlock_bh(&ipvs->rs_lock);
74383 }
74384 - atomic_set(&dest->conn_flags, conn_flags);
74385 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74386
74387 /* bind the service */
74388 if (!dest->svc) {
74389 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74390 " %-7s %-6d %-10d %-10d\n",
74391 &dest->addr.in6,
74392 ntohs(dest->port),
74393 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74394 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74395 atomic_read(&dest->weight),
74396 atomic_read(&dest->activeconns),
74397 atomic_read(&dest->inactconns));
74398 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74399 "%-7s %-6d %-10d %-10d\n",
74400 ntohl(dest->addr.ip),
74401 ntohs(dest->port),
74402 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74403 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74404 atomic_read(&dest->weight),
74405 atomic_read(&dest->activeconns),
74406 atomic_read(&dest->inactconns));
74407 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74408
74409 entry.addr = dest->addr.ip;
74410 entry.port = dest->port;
74411 - entry.conn_flags = atomic_read(&dest->conn_flags);
74412 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74413 entry.weight = atomic_read(&dest->weight);
74414 entry.u_threshold = dest->u_threshold;
74415 entry.l_threshold = dest->l_threshold;
74416 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74417 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74418
74419 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74420 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74421 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74422 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74423 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74424 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74425 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74426 index 2b6678c0..aaa41fc 100644
74427 --- a/net/netfilter/ipvs/ip_vs_sync.c
74428 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74429 @@ -649,7 +649,7 @@ control:
74430 * i.e only increment in_pkts for Templates.
74431 */
74432 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74433 - int pkts = atomic_add_return(1, &cp->in_pkts);
74434 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74435
74436 if (pkts % sysctl_sync_period(ipvs) != 1)
74437 return;
74438 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74439
74440 if (opt)
74441 memcpy(&cp->in_seq, opt, sizeof(*opt));
74442 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74443 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74444 cp->state = state;
74445 cp->old_state = cp->state;
74446 /*
74447 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74448 index aa2d720..d8aa111 100644
74449 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74450 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74451 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74452 else
74453 rc = NF_ACCEPT;
74454 /* do not touch skb anymore */
74455 - atomic_inc(&cp->in_pkts);
74456 + atomic_inc_unchecked(&cp->in_pkts);
74457 goto out;
74458 }
74459
74460 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74461 else
74462 rc = NF_ACCEPT;
74463 /* do not touch skb anymore */
74464 - atomic_inc(&cp->in_pkts);
74465 + atomic_inc_unchecked(&cp->in_pkts);
74466 goto out;
74467 }
74468
74469 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74470 index 66b2c54..c7884e3 100644
74471 --- a/net/netfilter/nfnetlink_log.c
74472 +++ b/net/netfilter/nfnetlink_log.c
74473 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74474 };
74475
74476 static DEFINE_SPINLOCK(instances_lock);
74477 -static atomic_t global_seq;
74478 +static atomic_unchecked_t global_seq;
74479
74480 #define INSTANCE_BUCKETS 16
74481 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74482 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74483 /* global sequence number */
74484 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74485 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74486 - htonl(atomic_inc_return(&global_seq)));
74487 + htonl(atomic_inc_return_unchecked(&global_seq)));
74488
74489 if (data_len) {
74490 struct nlattr *nla;
74491 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74492 new file mode 100644
74493 index 0000000..6905327
74494 --- /dev/null
74495 +++ b/net/netfilter/xt_gradm.c
74496 @@ -0,0 +1,51 @@
74497 +/*
74498 + * gradm match for netfilter
74499 + * Copyright © Zbigniew Krzystolik, 2010
74500 + *
74501 + * This program is free software; you can redistribute it and/or modify
74502 + * it under the terms of the GNU General Public License; either version
74503 + * 2 or 3 as published by the Free Software Foundation.
74504 + */
74505 +#include <linux/module.h>
74506 +#include <linux/moduleparam.h>
74507 +#include <linux/skbuff.h>
74508 +#include <linux/netfilter/x_tables.h>
74509 +#include <linux/grsecurity.h>
74510 +#include <linux/netfilter/xt_gradm.h>
74511 +
74512 +static bool
74513 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74514 +{
74515 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74516 + bool retval = false;
74517 + if (gr_acl_is_enabled())
74518 + retval = true;
74519 + return retval ^ info->invflags;
74520 +}
74521 +
74522 +static struct xt_match gradm_mt_reg __read_mostly = {
74523 + .name = "gradm",
74524 + .revision = 0,
74525 + .family = NFPROTO_UNSPEC,
74526 + .match = gradm_mt,
74527 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74528 + .me = THIS_MODULE,
74529 +};
74530 +
74531 +static int __init gradm_mt_init(void)
74532 +{
74533 + return xt_register_match(&gradm_mt_reg);
74534 +}
74535 +
74536 +static void __exit gradm_mt_exit(void)
74537 +{
74538 + xt_unregister_match(&gradm_mt_reg);
74539 +}
74540 +
74541 +module_init(gradm_mt_init);
74542 +module_exit(gradm_mt_exit);
74543 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74544 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74545 +MODULE_LICENSE("GPL");
74546 +MODULE_ALIAS("ipt_gradm");
74547 +MODULE_ALIAS("ip6t_gradm");
74548 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74549 index 4fe4fb4..87a89e5 100644
74550 --- a/net/netfilter/xt_statistic.c
74551 +++ b/net/netfilter/xt_statistic.c
74552 @@ -19,7 +19,7 @@
74553 #include <linux/module.h>
74554
74555 struct xt_statistic_priv {
74556 - atomic_t count;
74557 + atomic_unchecked_t count;
74558 } ____cacheline_aligned_in_smp;
74559
74560 MODULE_LICENSE("GPL");
74561 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74562 break;
74563 case XT_STATISTIC_MODE_NTH:
74564 do {
74565 - oval = atomic_read(&info->master->count);
74566 + oval = atomic_read_unchecked(&info->master->count);
74567 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74568 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74569 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74570 if (nval == 0)
74571 ret = !ret;
74572 break;
74573 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74574 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74575 if (info->master == NULL)
74576 return -ENOMEM;
74577 - atomic_set(&info->master->count, info->u.nth.count);
74578 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74579
74580 return 0;
74581 }
74582 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74583 index 1201b6d..bcff8c6 100644
74584 --- a/net/netlink/af_netlink.c
74585 +++ b/net/netlink/af_netlink.c
74586 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74587 sk->sk_error_report(sk);
74588 }
74589 }
74590 - atomic_inc(&sk->sk_drops);
74591 + atomic_inc_unchecked(&sk->sk_drops);
74592 }
74593
74594 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74595 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74596 sk_wmem_alloc_get(s),
74597 nlk->cb,
74598 atomic_read(&s->sk_refcnt),
74599 - atomic_read(&s->sk_drops),
74600 + atomic_read_unchecked(&s->sk_drops),
74601 sock_i_ino(s)
74602 );
74603
74604 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74605 index 732152f..60bb09e 100644
74606 --- a/net/netrom/af_netrom.c
74607 +++ b/net/netrom/af_netrom.c
74608 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74609 struct sock *sk = sock->sk;
74610 struct nr_sock *nr = nr_sk(sk);
74611
74612 + memset(sax, 0, sizeof(*sax));
74613 lock_sock(sk);
74614 if (peer != 0) {
74615 if (sk->sk_state != TCP_ESTABLISHED) {
74616 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74617 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74618 } else {
74619 sax->fsa_ax25.sax25_family = AF_NETROM;
74620 - sax->fsa_ax25.sax25_ndigis = 0;
74621 sax->fsa_ax25.sax25_call = nr->source_addr;
74622 *uaddr_len = sizeof(struct sockaddr_ax25);
74623 }
74624 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74625 index d9d4970..d5a6a68 100644
74626 --- a/net/packet/af_packet.c
74627 +++ b/net/packet/af_packet.c
74628 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74629
74630 spin_lock(&sk->sk_receive_queue.lock);
74631 po->stats.tp_packets++;
74632 - skb->dropcount = atomic_read(&sk->sk_drops);
74633 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74634 __skb_queue_tail(&sk->sk_receive_queue, skb);
74635 spin_unlock(&sk->sk_receive_queue.lock);
74636 sk->sk_data_ready(sk, skb->len);
74637 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74638 drop_n_acct:
74639 spin_lock(&sk->sk_receive_queue.lock);
74640 po->stats.tp_drops++;
74641 - atomic_inc(&sk->sk_drops);
74642 + atomic_inc_unchecked(&sk->sk_drops);
74643 spin_unlock(&sk->sk_receive_queue.lock);
74644
74645 drop_n_restore:
74646 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74647 case PACKET_HDRLEN:
74648 if (len > sizeof(int))
74649 len = sizeof(int);
74650 - if (copy_from_user(&val, optval, len))
74651 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74652 return -EFAULT;
74653 switch (val) {
74654 case TPACKET_V1:
74655 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74656
74657 if (put_user(len, optlen))
74658 return -EFAULT;
74659 - if (copy_to_user(optval, data, len))
74660 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74661 return -EFAULT;
74662 return 0;
74663 }
74664 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74665 index d65f699..05aa6ce 100644
74666 --- a/net/phonet/af_phonet.c
74667 +++ b/net/phonet/af_phonet.c
74668 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74669 {
74670 struct phonet_protocol *pp;
74671
74672 - if (protocol >= PHONET_NPROTO)
74673 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74674 return NULL;
74675
74676 rcu_read_lock();
74677 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74678 {
74679 int err = 0;
74680
74681 - if (protocol >= PHONET_NPROTO)
74682 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74683 return -EINVAL;
74684
74685 err = proto_register(pp->prot, 1);
74686 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74687 index 2ba6e9f..409573f 100644
74688 --- a/net/phonet/pep.c
74689 +++ b/net/phonet/pep.c
74690 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74691
74692 case PNS_PEP_CTRL_REQ:
74693 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74694 - atomic_inc(&sk->sk_drops);
74695 + atomic_inc_unchecked(&sk->sk_drops);
74696 break;
74697 }
74698 __skb_pull(skb, 4);
74699 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74700 }
74701
74702 if (pn->rx_credits == 0) {
74703 - atomic_inc(&sk->sk_drops);
74704 + atomic_inc_unchecked(&sk->sk_drops);
74705 err = -ENOBUFS;
74706 break;
74707 }
74708 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74709 }
74710
74711 if (pn->rx_credits == 0) {
74712 - atomic_inc(&sk->sk_drops);
74713 + atomic_inc_unchecked(&sk->sk_drops);
74714 err = NET_RX_DROP;
74715 break;
74716 }
74717 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74718 index 4c7eff3..59c727f 100644
74719 --- a/net/phonet/socket.c
74720 +++ b/net/phonet/socket.c
74721 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74722 pn->resource, sk->sk_state,
74723 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74724 sock_i_uid(sk), sock_i_ino(sk),
74725 - atomic_read(&sk->sk_refcnt), sk,
74726 - atomic_read(&sk->sk_drops), &len);
74727 + atomic_read(&sk->sk_refcnt),
74728 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74729 + NULL,
74730 +#else
74731 + sk,
74732 +#endif
74733 + atomic_read_unchecked(&sk->sk_drops), &len);
74734 }
74735 seq_printf(seq, "%*s\n", 127 - len, "");
74736 return 0;
74737 diff --git a/net/rds/cong.c b/net/rds/cong.c
74738 index e5b65ac..f3b6fb7 100644
74739 --- a/net/rds/cong.c
74740 +++ b/net/rds/cong.c
74741 @@ -78,7 +78,7 @@
74742 * finds that the saved generation number is smaller than the global generation
74743 * number, it wakes up the process.
74744 */
74745 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74746 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74747
74748 /*
74749 * Congestion monitoring
74750 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74751 rdsdebug("waking map %p for %pI4\n",
74752 map, &map->m_addr);
74753 rds_stats_inc(s_cong_update_received);
74754 - atomic_inc(&rds_cong_generation);
74755 + atomic_inc_unchecked(&rds_cong_generation);
74756 if (waitqueue_active(&map->m_waitq))
74757 wake_up(&map->m_waitq);
74758 if (waitqueue_active(&rds_poll_waitq))
74759 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74760
74761 int rds_cong_updated_since(unsigned long *recent)
74762 {
74763 - unsigned long gen = atomic_read(&rds_cong_generation);
74764 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74765
74766 if (likely(*recent == gen))
74767 return 0;
74768 diff --git a/net/rds/ib.h b/net/rds/ib.h
74769 index edfaaaf..8c89879 100644
74770 --- a/net/rds/ib.h
74771 +++ b/net/rds/ib.h
74772 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74773 /* sending acks */
74774 unsigned long i_ack_flags;
74775 #ifdef KERNEL_HAS_ATOMIC64
74776 - atomic64_t i_ack_next; /* next ACK to send */
74777 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74778 #else
74779 spinlock_t i_ack_lock; /* protect i_ack_next */
74780 u64 i_ack_next; /* next ACK to send */
74781 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74782 index 51c8689..36c555f 100644
74783 --- a/net/rds/ib_cm.c
74784 +++ b/net/rds/ib_cm.c
74785 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74786 /* Clear the ACK state */
74787 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74788 #ifdef KERNEL_HAS_ATOMIC64
74789 - atomic64_set(&ic->i_ack_next, 0);
74790 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74791 #else
74792 ic->i_ack_next = 0;
74793 #endif
74794 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74795 index e29e0ca..fa3a6a3 100644
74796 --- a/net/rds/ib_recv.c
74797 +++ b/net/rds/ib_recv.c
74798 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74799 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74800 int ack_required)
74801 {
74802 - atomic64_set(&ic->i_ack_next, seq);
74803 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74804 if (ack_required) {
74805 smp_mb__before_clear_bit();
74806 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74807 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74808 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74809 smp_mb__after_clear_bit();
74810
74811 - return atomic64_read(&ic->i_ack_next);
74812 + return atomic64_read_unchecked(&ic->i_ack_next);
74813 }
74814 #endif
74815
74816 diff --git a/net/rds/iw.h b/net/rds/iw.h
74817 index 04ce3b1..48119a6 100644
74818 --- a/net/rds/iw.h
74819 +++ b/net/rds/iw.h
74820 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74821 /* sending acks */
74822 unsigned long i_ack_flags;
74823 #ifdef KERNEL_HAS_ATOMIC64
74824 - atomic64_t i_ack_next; /* next ACK to send */
74825 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74826 #else
74827 spinlock_t i_ack_lock; /* protect i_ack_next */
74828 u64 i_ack_next; /* next ACK to send */
74829 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74830 index 9556d28..f046d0e 100644
74831 --- a/net/rds/iw_cm.c
74832 +++ b/net/rds/iw_cm.c
74833 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74834 /* Clear the ACK state */
74835 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74836 #ifdef KERNEL_HAS_ATOMIC64
74837 - atomic64_set(&ic->i_ack_next, 0);
74838 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74839 #else
74840 ic->i_ack_next = 0;
74841 #endif
74842 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74843 index 5e57347..3916042 100644
74844 --- a/net/rds/iw_recv.c
74845 +++ b/net/rds/iw_recv.c
74846 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74847 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74848 int ack_required)
74849 {
74850 - atomic64_set(&ic->i_ack_next, seq);
74851 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74852 if (ack_required) {
74853 smp_mb__before_clear_bit();
74854 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74855 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74856 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74857 smp_mb__after_clear_bit();
74858
74859 - return atomic64_read(&ic->i_ack_next);
74860 + return atomic64_read_unchecked(&ic->i_ack_next);
74861 }
74862 #endif
74863
74864 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74865 index edac9ef..16bcb98 100644
74866 --- a/net/rds/tcp.c
74867 +++ b/net/rds/tcp.c
74868 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74869 int val = 1;
74870
74871 set_fs(KERNEL_DS);
74872 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74873 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74874 sizeof(val));
74875 set_fs(oldfs);
74876 }
74877 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74878 index 1b4fd68..2234175 100644
74879 --- a/net/rds/tcp_send.c
74880 +++ b/net/rds/tcp_send.c
74881 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74882
74883 oldfs = get_fs();
74884 set_fs(KERNEL_DS);
74885 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74886 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74887 sizeof(val));
74888 set_fs(oldfs);
74889 }
74890 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74891 index 74c064c..fdec26f 100644
74892 --- a/net/rxrpc/af_rxrpc.c
74893 +++ b/net/rxrpc/af_rxrpc.c
74894 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74895 __be32 rxrpc_epoch;
74896
74897 /* current debugging ID */
74898 -atomic_t rxrpc_debug_id;
74899 +atomic_unchecked_t rxrpc_debug_id;
74900
74901 /* count of skbs currently in use */
74902 atomic_t rxrpc_n_skbs;
74903 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74904 index f99cfce..cc529dd 100644
74905 --- a/net/rxrpc/ar-ack.c
74906 +++ b/net/rxrpc/ar-ack.c
74907 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74908
74909 _enter("{%d,%d,%d,%d},",
74910 call->acks_hard, call->acks_unacked,
74911 - atomic_read(&call->sequence),
74912 + atomic_read_unchecked(&call->sequence),
74913 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74914
74915 stop = 0;
74916 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74917
74918 /* each Tx packet has a new serial number */
74919 sp->hdr.serial =
74920 - htonl(atomic_inc_return(&call->conn->serial));
74921 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74922
74923 hdr = (struct rxrpc_header *) txb->head;
74924 hdr->serial = sp->hdr.serial;
74925 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74926 */
74927 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74928 {
74929 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74930 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74931 }
74932
74933 /*
74934 @@ -629,7 +629,7 @@ process_further:
74935
74936 latest = ntohl(sp->hdr.serial);
74937 hard = ntohl(ack.firstPacket);
74938 - tx = atomic_read(&call->sequence);
74939 + tx = atomic_read_unchecked(&call->sequence);
74940
74941 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74942 latest,
74943 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74944 goto maybe_reschedule;
74945
74946 send_ACK_with_skew:
74947 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74948 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74949 ntohl(ack.serial));
74950 send_ACK:
74951 mtu = call->conn->trans->peer->if_mtu;
74952 @@ -1173,7 +1173,7 @@ send_ACK:
74953 ackinfo.rxMTU = htonl(5692);
74954 ackinfo.jumbo_max = htonl(4);
74955
74956 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74957 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74958 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74959 ntohl(hdr.serial),
74960 ntohs(ack.maxSkew),
74961 @@ -1191,7 +1191,7 @@ send_ACK:
74962 send_message:
74963 _debug("send message");
74964
74965 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74966 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74967 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74968 send_message_2:
74969
74970 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74971 index bf656c2..48f9d27 100644
74972 --- a/net/rxrpc/ar-call.c
74973 +++ b/net/rxrpc/ar-call.c
74974 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74975 spin_lock_init(&call->lock);
74976 rwlock_init(&call->state_lock);
74977 atomic_set(&call->usage, 1);
74978 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74979 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74980 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74981
74982 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74983 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
74984 index 4106ca9..a338d7a 100644
74985 --- a/net/rxrpc/ar-connection.c
74986 +++ b/net/rxrpc/ar-connection.c
74987 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
74988 rwlock_init(&conn->lock);
74989 spin_lock_init(&conn->state_lock);
74990 atomic_set(&conn->usage, 1);
74991 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74992 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74993 conn->avail_calls = RXRPC_MAXCALLS;
74994 conn->size_align = 4;
74995 conn->header_size = sizeof(struct rxrpc_header);
74996 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
74997 index e7ed43a..6afa140 100644
74998 --- a/net/rxrpc/ar-connevent.c
74999 +++ b/net/rxrpc/ar-connevent.c
75000 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
75001
75002 len = iov[0].iov_len + iov[1].iov_len;
75003
75004 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75005 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75006 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
75007
75008 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75009 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
75010 index 1a2b0633..e8d1382 100644
75011 --- a/net/rxrpc/ar-input.c
75012 +++ b/net/rxrpc/ar-input.c
75013 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
75014 /* track the latest serial number on this connection for ACK packet
75015 * information */
75016 serial = ntohl(sp->hdr.serial);
75017 - hi_serial = atomic_read(&call->conn->hi_serial);
75018 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
75019 while (serial > hi_serial)
75020 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
75021 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
75022 serial);
75023
75024 /* request ACK generation for any ACK or DATA packet that requests
75025 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
75026 index 8e22bd3..f66d1c0 100644
75027 --- a/net/rxrpc/ar-internal.h
75028 +++ b/net/rxrpc/ar-internal.h
75029 @@ -272,8 +272,8 @@ struct rxrpc_connection {
75030 int error; /* error code for local abort */
75031 int debug_id; /* debug ID for printks */
75032 unsigned call_counter; /* call ID counter */
75033 - atomic_t serial; /* packet serial number counter */
75034 - atomic_t hi_serial; /* highest serial number received */
75035 + atomic_unchecked_t serial; /* packet serial number counter */
75036 + atomic_unchecked_t hi_serial; /* highest serial number received */
75037 u8 avail_calls; /* number of calls available */
75038 u8 size_align; /* data size alignment (for security) */
75039 u8 header_size; /* rxrpc + security header size */
75040 @@ -346,7 +346,7 @@ struct rxrpc_call {
75041 spinlock_t lock;
75042 rwlock_t state_lock; /* lock for state transition */
75043 atomic_t usage;
75044 - atomic_t sequence; /* Tx data packet sequence counter */
75045 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
75046 u32 abort_code; /* local/remote abort code */
75047 enum { /* current state of call */
75048 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
75049 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
75050 */
75051 extern atomic_t rxrpc_n_skbs;
75052 extern __be32 rxrpc_epoch;
75053 -extern atomic_t rxrpc_debug_id;
75054 +extern atomic_unchecked_t rxrpc_debug_id;
75055 extern struct workqueue_struct *rxrpc_workqueue;
75056
75057 /*
75058 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
75059 index 87f7135..74d3703 100644
75060 --- a/net/rxrpc/ar-local.c
75061 +++ b/net/rxrpc/ar-local.c
75062 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
75063 spin_lock_init(&local->lock);
75064 rwlock_init(&local->services_lock);
75065 atomic_set(&local->usage, 1);
75066 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
75067 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75068 memcpy(&local->srx, srx, sizeof(*srx));
75069 }
75070
75071 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
75072 index 338d793..47391d0 100644
75073 --- a/net/rxrpc/ar-output.c
75074 +++ b/net/rxrpc/ar-output.c
75075 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
75076 sp->hdr.cid = call->cid;
75077 sp->hdr.callNumber = call->call_id;
75078 sp->hdr.seq =
75079 - htonl(atomic_inc_return(&call->sequence));
75080 + htonl(atomic_inc_return_unchecked(&call->sequence));
75081 sp->hdr.serial =
75082 - htonl(atomic_inc_return(&conn->serial));
75083 + htonl(atomic_inc_return_unchecked(&conn->serial));
75084 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
75085 sp->hdr.userStatus = 0;
75086 sp->hdr.securityIndex = conn->security_ix;
75087 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
75088 index 2754f09..b20e38f 100644
75089 --- a/net/rxrpc/ar-peer.c
75090 +++ b/net/rxrpc/ar-peer.c
75091 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
75092 INIT_LIST_HEAD(&peer->error_targets);
75093 spin_lock_init(&peer->lock);
75094 atomic_set(&peer->usage, 1);
75095 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
75096 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75097 memcpy(&peer->srx, srx, sizeof(*srx));
75098
75099 rxrpc_assess_MTU_size(peer);
75100 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
75101 index 38047f7..9f48511 100644
75102 --- a/net/rxrpc/ar-proc.c
75103 +++ b/net/rxrpc/ar-proc.c
75104 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
75105 atomic_read(&conn->usage),
75106 rxrpc_conn_states[conn->state],
75107 key_serial(conn->key),
75108 - atomic_read(&conn->serial),
75109 - atomic_read(&conn->hi_serial));
75110 + atomic_read_unchecked(&conn->serial),
75111 + atomic_read_unchecked(&conn->hi_serial));
75112
75113 return 0;
75114 }
75115 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
75116 index 92df566..87ec1bf 100644
75117 --- a/net/rxrpc/ar-transport.c
75118 +++ b/net/rxrpc/ar-transport.c
75119 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
75120 spin_lock_init(&trans->client_lock);
75121 rwlock_init(&trans->conn_lock);
75122 atomic_set(&trans->usage, 1);
75123 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
75124 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
75125
75126 if (peer->srx.transport.family == AF_INET) {
75127 switch (peer->srx.transport_type) {
75128 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
75129 index 7635107..4670276 100644
75130 --- a/net/rxrpc/rxkad.c
75131 +++ b/net/rxrpc/rxkad.c
75132 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
75133
75134 len = iov[0].iov_len + iov[1].iov_len;
75135
75136 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
75137 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75138 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
75139
75140 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
75141 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
75142
75143 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
75144
75145 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
75146 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
75147 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
75148
75149 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
75150 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
75151 index 1e2eee8..ce3967e 100644
75152 --- a/net/sctp/proc.c
75153 +++ b/net/sctp/proc.c
75154 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
75155 seq_printf(seq,
75156 "%8pK %8pK %-3d %-3d %-2d %-4d "
75157 "%4d %8d %8d %7d %5lu %-5d %5d ",
75158 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
75159 + assoc, sk,
75160 + sctp_sk(sk)->type, sk->sk_state,
75161 assoc->state, hash,
75162 assoc->assoc_id,
75163 assoc->sndbuf_used,
75164 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
75165 index 54a7cd2..944edae 100644
75166 --- a/net/sctp/socket.c
75167 +++ b/net/sctp/socket.c
75168 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
75169 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
75170 if (space_left < addrlen)
75171 return -ENOMEM;
75172 - if (copy_to_user(to, &temp, addrlen))
75173 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
75174 return -EFAULT;
75175 to += addrlen;
75176 cnt++;
75177 diff --git a/net/socket.c b/net/socket.c
75178 index 2dce67a..1e91168 100644
75179 --- a/net/socket.c
75180 +++ b/net/socket.c
75181 @@ -88,6 +88,7 @@
75182 #include <linux/nsproxy.h>
75183 #include <linux/magic.h>
75184 #include <linux/slab.h>
75185 +#include <linux/in.h>
75186
75187 #include <asm/uaccess.h>
75188 #include <asm/unistd.h>
75189 @@ -105,6 +106,8 @@
75190 #include <linux/sockios.h>
75191 #include <linux/atalk.h>
75192
75193 +#include <linux/grsock.h>
75194 +
75195 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
75196 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
75197 unsigned long nr_segs, loff_t pos);
75198 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
75199 &sockfs_dentry_operations, SOCKFS_MAGIC);
75200 }
75201
75202 -static struct vfsmount *sock_mnt __read_mostly;
75203 +struct vfsmount *sock_mnt __read_mostly;
75204
75205 static struct file_system_type sock_fs_type = {
75206 .name = "sockfs",
75207 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
75208 return -EAFNOSUPPORT;
75209 if (type < 0 || type >= SOCK_MAX)
75210 return -EINVAL;
75211 + if (protocol < 0)
75212 + return -EINVAL;
75213
75214 /* Compatibility.
75215
75216 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
75217 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
75218 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
75219
75220 + if(!gr_search_socket(family, type, protocol)) {
75221 + retval = -EACCES;
75222 + goto out;
75223 + }
75224 +
75225 + if (gr_handle_sock_all(family, type, protocol)) {
75226 + retval = -EACCES;
75227 + goto out;
75228 + }
75229 +
75230 retval = sock_create(family, type, protocol, &sock);
75231 if (retval < 0)
75232 goto out;
75233 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75234 if (sock) {
75235 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
75236 if (err >= 0) {
75237 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
75238 + err = -EACCES;
75239 + goto error;
75240 + }
75241 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
75242 + if (err)
75243 + goto error;
75244 +
75245 err = security_socket_bind(sock,
75246 (struct sockaddr *)&address,
75247 addrlen);
75248 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
75249 (struct sockaddr *)
75250 &address, addrlen);
75251 }
75252 +error:
75253 fput_light(sock->file, fput_needed);
75254 }
75255 return err;
75256 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
75257 if ((unsigned)backlog > somaxconn)
75258 backlog = somaxconn;
75259
75260 + if (gr_handle_sock_server_other(sock->sk)) {
75261 + err = -EPERM;
75262 + goto error;
75263 + }
75264 +
75265 + err = gr_search_listen(sock);
75266 + if (err)
75267 + goto error;
75268 +
75269 err = security_socket_listen(sock, backlog);
75270 if (!err)
75271 err = sock->ops->listen(sock, backlog);
75272
75273 +error:
75274 fput_light(sock->file, fput_needed);
75275 }
75276 return err;
75277 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75278 newsock->type = sock->type;
75279 newsock->ops = sock->ops;
75280
75281 + if (gr_handle_sock_server_other(sock->sk)) {
75282 + err = -EPERM;
75283 + sock_release(newsock);
75284 + goto out_put;
75285 + }
75286 +
75287 + err = gr_search_accept(sock);
75288 + if (err) {
75289 + sock_release(newsock);
75290 + goto out_put;
75291 + }
75292 +
75293 /*
75294 * We don't need try_module_get here, as the listening socket (sock)
75295 * has the protocol module (sock->ops->owner) held.
75296 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75297 fd_install(newfd, newfile);
75298 err = newfd;
75299
75300 + gr_attach_curr_ip(newsock->sk);
75301 +
75302 out_put:
75303 fput_light(sock->file, fput_needed);
75304 out:
75305 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75306 int, addrlen)
75307 {
75308 struct socket *sock;
75309 + struct sockaddr *sck;
75310 struct sockaddr_storage address;
75311 int err, fput_needed;
75312
75313 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75314 if (err < 0)
75315 goto out_put;
75316
75317 + sck = (struct sockaddr *)&address;
75318 +
75319 + if (gr_handle_sock_client(sck)) {
75320 + err = -EACCES;
75321 + goto out_put;
75322 + }
75323 +
75324 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
75325 + if (err)
75326 + goto out_put;
75327 +
75328 err =
75329 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
75330 if (err)
75331 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
75332 * checking falls down on this.
75333 */
75334 if (copy_from_user(ctl_buf,
75335 - (void __user __force *)msg_sys->msg_control,
75336 + (void __force_user *)msg_sys->msg_control,
75337 ctl_len))
75338 goto out_freectl;
75339 msg_sys->msg_control = ctl_buf;
75340 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
75341 * kernel msghdr to use the kernel address space)
75342 */
75343
75344 - uaddr = (__force void __user *)msg_sys->msg_name;
75345 + uaddr = (void __force_user *)msg_sys->msg_name;
75346 uaddr_len = COMPAT_NAMELEN(msg);
75347 if (MSG_CMSG_COMPAT & flags) {
75348 err = verify_compat_iovec(msg_sys, iov,
75349 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75350 }
75351
75352 ifr = compat_alloc_user_space(buf_size);
75353 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
75354 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
75355
75356 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
75357 return -EFAULT;
75358 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75359 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
75360
75361 if (copy_in_user(rxnfc, compat_rxnfc,
75362 - (void *)(&rxnfc->fs.m_ext + 1) -
75363 - (void *)rxnfc) ||
75364 + (void __user *)(&rxnfc->fs.m_ext + 1) -
75365 + (void __user *)rxnfc) ||
75366 copy_in_user(&rxnfc->fs.ring_cookie,
75367 &compat_rxnfc->fs.ring_cookie,
75368 - (void *)(&rxnfc->fs.location + 1) -
75369 - (void *)&rxnfc->fs.ring_cookie) ||
75370 + (void __user *)(&rxnfc->fs.location + 1) -
75371 + (void __user *)&rxnfc->fs.ring_cookie) ||
75372 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
75373 sizeof(rxnfc->rule_cnt)))
75374 return -EFAULT;
75375 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75376
75377 if (convert_out) {
75378 if (copy_in_user(compat_rxnfc, rxnfc,
75379 - (const void *)(&rxnfc->fs.m_ext + 1) -
75380 - (const void *)rxnfc) ||
75381 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75382 + (const void __user *)rxnfc) ||
75383 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75384 &rxnfc->fs.ring_cookie,
75385 - (const void *)(&rxnfc->fs.location + 1) -
75386 - (const void *)&rxnfc->fs.ring_cookie) ||
75387 + (const void __user *)(&rxnfc->fs.location + 1) -
75388 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75389 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75390 sizeof(rxnfc->rule_cnt)))
75391 return -EFAULT;
75392 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75393 old_fs = get_fs();
75394 set_fs(KERNEL_DS);
75395 err = dev_ioctl(net, cmd,
75396 - (struct ifreq __user __force *) &kifr);
75397 + (struct ifreq __force_user *) &kifr);
75398 set_fs(old_fs);
75399
75400 return err;
75401 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75402
75403 old_fs = get_fs();
75404 set_fs(KERNEL_DS);
75405 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75406 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75407 set_fs(old_fs);
75408
75409 if (cmd == SIOCGIFMAP && !err) {
75410 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75411 ret |= __get_user(rtdev, &(ur4->rt_dev));
75412 if (rtdev) {
75413 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75414 - r4.rt_dev = (char __user __force *)devname;
75415 + r4.rt_dev = (char __force_user *)devname;
75416 devname[15] = 0;
75417 } else
75418 r4.rt_dev = NULL;
75419 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75420 int __user *uoptlen;
75421 int err;
75422
75423 - uoptval = (char __user __force *) optval;
75424 - uoptlen = (int __user __force *) optlen;
75425 + uoptval = (char __force_user *) optval;
75426 + uoptlen = (int __force_user *) optlen;
75427
75428 set_fs(KERNEL_DS);
75429 if (level == SOL_SOCKET)
75430 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75431 char __user *uoptval;
75432 int err;
75433
75434 - uoptval = (char __user __force *) optval;
75435 + uoptval = (char __force_user *) optval;
75436
75437 set_fs(KERNEL_DS);
75438 if (level == SOL_SOCKET)
75439 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75440 index 00a1a2a..6a0138a 100644
75441 --- a/net/sunrpc/sched.c
75442 +++ b/net/sunrpc/sched.c
75443 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75444 #ifdef RPC_DEBUG
75445 static void rpc_task_set_debuginfo(struct rpc_task *task)
75446 {
75447 - static atomic_t rpc_pid;
75448 + static atomic_unchecked_t rpc_pid;
75449
75450 - task->tk_pid = atomic_inc_return(&rpc_pid);
75451 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75452 }
75453 #else
75454 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75455 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75456 index 71bed1c..5dff36d 100644
75457 --- a/net/sunrpc/svcsock.c
75458 +++ b/net/sunrpc/svcsock.c
75459 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75460 int buflen, unsigned int base)
75461 {
75462 size_t save_iovlen;
75463 - void __user *save_iovbase;
75464 + void *save_iovbase;
75465 unsigned int i;
75466 int ret;
75467
75468 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75469 index 09af4fa..77110a9 100644
75470 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75471 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75472 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75473 static unsigned int min_max_inline = 4096;
75474 static unsigned int max_max_inline = 65536;
75475
75476 -atomic_t rdma_stat_recv;
75477 -atomic_t rdma_stat_read;
75478 -atomic_t rdma_stat_write;
75479 -atomic_t rdma_stat_sq_starve;
75480 -atomic_t rdma_stat_rq_starve;
75481 -atomic_t rdma_stat_rq_poll;
75482 -atomic_t rdma_stat_rq_prod;
75483 -atomic_t rdma_stat_sq_poll;
75484 -atomic_t rdma_stat_sq_prod;
75485 +atomic_unchecked_t rdma_stat_recv;
75486 +atomic_unchecked_t rdma_stat_read;
75487 +atomic_unchecked_t rdma_stat_write;
75488 +atomic_unchecked_t rdma_stat_sq_starve;
75489 +atomic_unchecked_t rdma_stat_rq_starve;
75490 +atomic_unchecked_t rdma_stat_rq_poll;
75491 +atomic_unchecked_t rdma_stat_rq_prod;
75492 +atomic_unchecked_t rdma_stat_sq_poll;
75493 +atomic_unchecked_t rdma_stat_sq_prod;
75494
75495 /* Temporary NFS request map and context caches */
75496 struct kmem_cache *svc_rdma_map_cachep;
75497 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75498 len -= *ppos;
75499 if (len > *lenp)
75500 len = *lenp;
75501 - if (len && copy_to_user(buffer, str_buf, len))
75502 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75503 return -EFAULT;
75504 *lenp = len;
75505 *ppos += len;
75506 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75507 {
75508 .procname = "rdma_stat_read",
75509 .data = &rdma_stat_read,
75510 - .maxlen = sizeof(atomic_t),
75511 + .maxlen = sizeof(atomic_unchecked_t),
75512 .mode = 0644,
75513 .proc_handler = read_reset_stat,
75514 },
75515 {
75516 .procname = "rdma_stat_recv",
75517 .data = &rdma_stat_recv,
75518 - .maxlen = sizeof(atomic_t),
75519 + .maxlen = sizeof(atomic_unchecked_t),
75520 .mode = 0644,
75521 .proc_handler = read_reset_stat,
75522 },
75523 {
75524 .procname = "rdma_stat_write",
75525 .data = &rdma_stat_write,
75526 - .maxlen = sizeof(atomic_t),
75527 + .maxlen = sizeof(atomic_unchecked_t),
75528 .mode = 0644,
75529 .proc_handler = read_reset_stat,
75530 },
75531 {
75532 .procname = "rdma_stat_sq_starve",
75533 .data = &rdma_stat_sq_starve,
75534 - .maxlen = sizeof(atomic_t),
75535 + .maxlen = sizeof(atomic_unchecked_t),
75536 .mode = 0644,
75537 .proc_handler = read_reset_stat,
75538 },
75539 {
75540 .procname = "rdma_stat_rq_starve",
75541 .data = &rdma_stat_rq_starve,
75542 - .maxlen = sizeof(atomic_t),
75543 + .maxlen = sizeof(atomic_unchecked_t),
75544 .mode = 0644,
75545 .proc_handler = read_reset_stat,
75546 },
75547 {
75548 .procname = "rdma_stat_rq_poll",
75549 .data = &rdma_stat_rq_poll,
75550 - .maxlen = sizeof(atomic_t),
75551 + .maxlen = sizeof(atomic_unchecked_t),
75552 .mode = 0644,
75553 .proc_handler = read_reset_stat,
75554 },
75555 {
75556 .procname = "rdma_stat_rq_prod",
75557 .data = &rdma_stat_rq_prod,
75558 - .maxlen = sizeof(atomic_t),
75559 + .maxlen = sizeof(atomic_unchecked_t),
75560 .mode = 0644,
75561 .proc_handler = read_reset_stat,
75562 },
75563 {
75564 .procname = "rdma_stat_sq_poll",
75565 .data = &rdma_stat_sq_poll,
75566 - .maxlen = sizeof(atomic_t),
75567 + .maxlen = sizeof(atomic_unchecked_t),
75568 .mode = 0644,
75569 .proc_handler = read_reset_stat,
75570 },
75571 {
75572 .procname = "rdma_stat_sq_prod",
75573 .data = &rdma_stat_sq_prod,
75574 - .maxlen = sizeof(atomic_t),
75575 + .maxlen = sizeof(atomic_unchecked_t),
75576 .mode = 0644,
75577 .proc_handler = read_reset_stat,
75578 },
75579 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75580 index df67211..c354b13 100644
75581 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75582 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75583 @@ -499,7 +499,7 @@ next_sge:
75584 svc_rdma_put_context(ctxt, 0);
75585 goto out;
75586 }
75587 - atomic_inc(&rdma_stat_read);
75588 + atomic_inc_unchecked(&rdma_stat_read);
75589
75590 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75591 chl_map->ch[ch_no].count -= read_wr.num_sge;
75592 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75593 dto_q);
75594 list_del_init(&ctxt->dto_q);
75595 } else {
75596 - atomic_inc(&rdma_stat_rq_starve);
75597 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75598 clear_bit(XPT_DATA, &xprt->xpt_flags);
75599 ctxt = NULL;
75600 }
75601 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75602 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75603 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75604 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75605 - atomic_inc(&rdma_stat_recv);
75606 + atomic_inc_unchecked(&rdma_stat_recv);
75607
75608 /* Build up the XDR from the receive buffers. */
75609 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75610 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75611 index 249a835..fb2794b 100644
75612 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75613 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75614 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75615 write_wr.wr.rdma.remote_addr = to;
75616
75617 /* Post It */
75618 - atomic_inc(&rdma_stat_write);
75619 + atomic_inc_unchecked(&rdma_stat_write);
75620 if (svc_rdma_send(xprt, &write_wr))
75621 goto err;
75622 return 0;
75623 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75624 index ba1296d..0fec1a5 100644
75625 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75626 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75627 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75628 return;
75629
75630 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75631 - atomic_inc(&rdma_stat_rq_poll);
75632 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75633
75634 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75635 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75636 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75637 }
75638
75639 if (ctxt)
75640 - atomic_inc(&rdma_stat_rq_prod);
75641 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75642
75643 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75644 /*
75645 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75646 return;
75647
75648 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75649 - atomic_inc(&rdma_stat_sq_poll);
75650 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75651 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75652 if (wc.status != IB_WC_SUCCESS)
75653 /* Close the transport */
75654 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75655 }
75656
75657 if (ctxt)
75658 - atomic_inc(&rdma_stat_sq_prod);
75659 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75660 }
75661
75662 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75663 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75664 spin_lock_bh(&xprt->sc_lock);
75665 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75666 spin_unlock_bh(&xprt->sc_lock);
75667 - atomic_inc(&rdma_stat_sq_starve);
75668 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75669
75670 /* See if we can opportunistically reap SQ WR to make room */
75671 sq_cq_reap(xprt);
75672 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75673 index e758139..d29ea47 100644
75674 --- a/net/sysctl_net.c
75675 +++ b/net/sysctl_net.c
75676 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75677 struct ctl_table *table)
75678 {
75679 /* Allow network administrator to have same access as root. */
75680 - if (capable(CAP_NET_ADMIN)) {
75681 + if (capable_nolog(CAP_NET_ADMIN)) {
75682 int mode = (table->mode >> 6) & 7;
75683 return (mode << 6) | (mode << 3) | mode;
75684 }
75685 diff --git a/net/tipc/link.c b/net/tipc/link.c
75686 index ae98a72..7bb6056 100644
75687 --- a/net/tipc/link.c
75688 +++ b/net/tipc/link.c
75689 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75690 struct tipc_msg fragm_hdr;
75691 struct sk_buff *buf, *buf_chain, *prev;
75692 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75693 - const unchar *sect_crs;
75694 + const unchar __user *sect_crs;
75695 int curr_sect;
75696 u32 fragm_no;
75697
75698 @@ -1247,7 +1247,7 @@ again:
75699
75700 if (!sect_rest) {
75701 sect_rest = msg_sect[++curr_sect].iov_len;
75702 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75703 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75704 }
75705
75706 if (sect_rest < fragm_rest)
75707 @@ -1266,7 +1266,7 @@ error:
75708 }
75709 } else
75710 skb_copy_to_linear_data_offset(buf, fragm_crs,
75711 - sect_crs, sz);
75712 + (const void __force_kernel *)sect_crs, sz);
75713 sect_crs += sz;
75714 sect_rest -= sz;
75715 fragm_crs += sz;
75716 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75717 index 83d5096..dcba497 100644
75718 --- a/net/tipc/msg.c
75719 +++ b/net/tipc/msg.c
75720 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75721 msg_sect[cnt].iov_len);
75722 else
75723 skb_copy_to_linear_data_offset(*buf, pos,
75724 - msg_sect[cnt].iov_base,
75725 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75726 msg_sect[cnt].iov_len);
75727 pos += msg_sect[cnt].iov_len;
75728 }
75729 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75730 index 1983717..4d6102c 100644
75731 --- a/net/tipc/subscr.c
75732 +++ b/net/tipc/subscr.c
75733 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75734 {
75735 struct iovec msg_sect;
75736
75737 - msg_sect.iov_base = (void *)&sub->evt;
75738 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75739 msg_sect.iov_len = sizeof(struct tipc_event);
75740
75741 sub->evt.event = htohl(event, sub->swap);
75742 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75743 index d99678a..3514a21 100644
75744 --- a/net/unix/af_unix.c
75745 +++ b/net/unix/af_unix.c
75746 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75747 err = -ECONNREFUSED;
75748 if (!S_ISSOCK(inode->i_mode))
75749 goto put_fail;
75750 +
75751 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75752 + err = -EACCES;
75753 + goto put_fail;
75754 + }
75755 +
75756 u = unix_find_socket_byinode(inode);
75757 if (!u)
75758 goto put_fail;
75759 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75760 if (u) {
75761 struct dentry *dentry;
75762 dentry = unix_sk(u)->dentry;
75763 +
75764 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75765 + err = -EPERM;
75766 + sock_put(u);
75767 + goto fail;
75768 + }
75769 +
75770 if (dentry)
75771 touch_atime(unix_sk(u)->mnt, dentry);
75772 } else
75773 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75774 err = security_path_mknod(&path, dentry, mode, 0);
75775 if (err)
75776 goto out_mknod_drop_write;
75777 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75778 + err = -EACCES;
75779 + goto out_mknod_drop_write;
75780 + }
75781 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75782 out_mknod_drop_write:
75783 mnt_drop_write(path.mnt);
75784 if (err)
75785 goto out_mknod_dput;
75786 +
75787 + gr_handle_create(dentry, path.mnt);
75788 +
75789 mutex_unlock(&path.dentry->d_inode->i_mutex);
75790 dput(path.dentry);
75791 path.dentry = dentry;
75792 diff --git a/net/wireless/core.h b/net/wireless/core.h
75793 index b9ec306..b4a563e 100644
75794 --- a/net/wireless/core.h
75795 +++ b/net/wireless/core.h
75796 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75797 struct mutex mtx;
75798
75799 /* rfkill support */
75800 - struct rfkill_ops rfkill_ops;
75801 + rfkill_ops_no_const rfkill_ops;
75802 struct rfkill *rfkill;
75803 struct work_struct rfkill_sync;
75804
75805 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75806 index 0af7f54..c916d2f 100644
75807 --- a/net/wireless/wext-core.c
75808 +++ b/net/wireless/wext-core.c
75809 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75810 */
75811
75812 /* Support for very large requests */
75813 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75814 - (user_length > descr->max_tokens)) {
75815 + if (user_length > descr->max_tokens) {
75816 /* Allow userspace to GET more than max so
75817 * we can support any size GET requests.
75818 * There is still a limit : -ENOMEM.
75819 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75820 }
75821 }
75822
75823 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75824 - /*
75825 - * If this is a GET, but not NOMAX, it means that the extra
75826 - * data is not bounded by userspace, but by max_tokens. Thus
75827 - * set the length to max_tokens. This matches the extra data
75828 - * allocation.
75829 - * The driver should fill it with the number of tokens it
75830 - * provided, and it may check iwp->length rather than having
75831 - * knowledge of max_tokens. If the driver doesn't change the
75832 - * iwp->length, this ioctl just copies back max_token tokens
75833 - * filled with zeroes. Hopefully the driver isn't claiming
75834 - * them to be valid data.
75835 - */
75836 - iwp->length = descr->max_tokens;
75837 - }
75838 -
75839 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75840
75841 iwp->length += essid_compat;
75842 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75843 index 9049a5c..cfa6f5c 100644
75844 --- a/net/xfrm/xfrm_policy.c
75845 +++ b/net/xfrm/xfrm_policy.c
75846 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75847 {
75848 policy->walk.dead = 1;
75849
75850 - atomic_inc(&policy->genid);
75851 + atomic_inc_unchecked(&policy->genid);
75852
75853 if (del_timer(&policy->timer))
75854 xfrm_pol_put(policy);
75855 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75856 hlist_add_head(&policy->bydst, chain);
75857 xfrm_pol_hold(policy);
75858 net->xfrm.policy_count[dir]++;
75859 - atomic_inc(&flow_cache_genid);
75860 + atomic_inc_unchecked(&flow_cache_genid);
75861 if (delpol)
75862 __xfrm_policy_unlink(delpol, dir);
75863 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75864 @@ -1530,7 +1530,7 @@ free_dst:
75865 goto out;
75866 }
75867
75868 -static int inline
75869 +static inline int
75870 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75871 {
75872 if (!*target) {
75873 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75874 return 0;
75875 }
75876
75877 -static int inline
75878 +static inline int
75879 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75880 {
75881 #ifdef CONFIG_XFRM_SUB_POLICY
75882 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75883 #endif
75884 }
75885
75886 -static int inline
75887 +static inline int
75888 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75889 {
75890 #ifdef CONFIG_XFRM_SUB_POLICY
75891 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75892
75893 xdst->num_pols = num_pols;
75894 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75895 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75896 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75897
75898 return xdst;
75899 }
75900 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75901 if (xdst->xfrm_genid != dst->xfrm->genid)
75902 return 0;
75903 if (xdst->num_pols > 0 &&
75904 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75905 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75906 return 0;
75907
75908 mtu = dst_mtu(dst->child);
75909 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75910 sizeof(pol->xfrm_vec[i].saddr));
75911 pol->xfrm_vec[i].encap_family = mp->new_family;
75912 /* flush bundles */
75913 - atomic_inc(&pol->genid);
75914 + atomic_inc_unchecked(&pol->genid);
75915 }
75916 }
75917
75918 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75919 index d2b366c..51ff91ebc 100644
75920 --- a/scripts/Makefile.build
75921 +++ b/scripts/Makefile.build
75922 @@ -109,7 +109,7 @@ endif
75923 endif
75924
75925 # Do not include host rules unless needed
75926 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75927 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75928 include scripts/Makefile.host
75929 endif
75930
75931 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75932 index 686cb0d..9d653bf 100644
75933 --- a/scripts/Makefile.clean
75934 +++ b/scripts/Makefile.clean
75935 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75936 __clean-files := $(extra-y) $(always) \
75937 $(targets) $(clean-files) \
75938 $(host-progs) \
75939 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75940 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75941 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75942
75943 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75944
75945 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75946 index 1ac414f..a1c1451 100644
75947 --- a/scripts/Makefile.host
75948 +++ b/scripts/Makefile.host
75949 @@ -31,6 +31,7 @@
75950 # Note: Shared libraries consisting of C++ files are not supported
75951
75952 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75953 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75954
75955 # C code
75956 # Executables compiled from a single .c file
75957 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75958 # Shared libaries (only .c supported)
75959 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75960 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75961 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75962 # Remove .so files from "xxx-objs"
75963 host-cobjs := $(filter-out %.so,$(host-cobjs))
75964
75965 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75966 index cb1f50c..cef2a7c 100644
75967 --- a/scripts/basic/fixdep.c
75968 +++ b/scripts/basic/fixdep.c
75969 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75970 /*
75971 * Lookup a value in the configuration string.
75972 */
75973 -static int is_defined_config(const char *name, int len, unsigned int hash)
75974 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75975 {
75976 struct item *aux;
75977
75978 @@ -211,10 +211,10 @@ static void clear_config(void)
75979 /*
75980 * Record the use of a CONFIG_* word.
75981 */
75982 -static void use_config(const char *m, int slen)
75983 +static void use_config(const char *m, unsigned int slen)
75984 {
75985 unsigned int hash = strhash(m, slen);
75986 - int c, i;
75987 + unsigned int c, i;
75988
75989 if (is_defined_config(m, slen, hash))
75990 return;
75991 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
75992
75993 static void parse_config_file(const char *map, size_t len)
75994 {
75995 - const int *end = (const int *) (map + len);
75996 + const unsigned int *end = (const unsigned int *) (map + len);
75997 /* start at +1, so that p can never be < map */
75998 - const int *m = (const int *) map + 1;
75999 + const unsigned int *m = (const unsigned int *) map + 1;
76000 const char *p, *q;
76001
76002 for (; m < end; m++) {
76003 @@ -406,7 +406,7 @@ static void print_deps(void)
76004 static void traps(void)
76005 {
76006 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
76007 - int *p = (int *)test;
76008 + unsigned int *p = (unsigned int *)test;
76009
76010 if (*p != INT_CONF) {
76011 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
76012 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
76013 new file mode 100644
76014 index 0000000..8729101
76015 --- /dev/null
76016 +++ b/scripts/gcc-plugin.sh
76017 @@ -0,0 +1,2 @@
76018 +#!/bin/sh
76019 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
76020 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
76021 index f936d1f..a66d95f 100644
76022 --- a/scripts/mod/file2alias.c
76023 +++ b/scripts/mod/file2alias.c
76024 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
76025 unsigned long size, unsigned long id_size,
76026 void *symval)
76027 {
76028 - int i;
76029 + unsigned int i;
76030
76031 if (size % id_size || size < id_size) {
76032 if (cross_build != 0)
76033 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
76034 /* USB is special because the bcdDevice can be matched against a numeric range */
76035 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
76036 static void do_usb_entry(struct usb_device_id *id,
76037 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
76038 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
76039 unsigned char range_lo, unsigned char range_hi,
76040 unsigned char max, struct module *mod)
76041 {
76042 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
76043 {
76044 unsigned int devlo, devhi;
76045 unsigned char chi, clo, max;
76046 - int ndigits;
76047 + unsigned int ndigits;
76048
76049 id->match_flags = TO_NATIVE(id->match_flags);
76050 id->idVendor = TO_NATIVE(id->idVendor);
76051 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
76052 for (i = 0; i < count; i++) {
76053 const char *id = (char *)devs[i].id;
76054 char acpi_id[sizeof(devs[0].id)];
76055 - int j;
76056 + unsigned int j;
76057
76058 buf_printf(&mod->dev_table_buf,
76059 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76060 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76061
76062 for (j = 0; j < PNP_MAX_DEVICES; j++) {
76063 const char *id = (char *)card->devs[j].id;
76064 - int i2, j2;
76065 + unsigned int i2, j2;
76066 int dup = 0;
76067
76068 if (!id[0])
76069 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
76070 /* add an individual alias for every device entry */
76071 if (!dup) {
76072 char acpi_id[sizeof(card->devs[0].id)];
76073 - int k;
76074 + unsigned int k;
76075
76076 buf_printf(&mod->dev_table_buf,
76077 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
76078 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
76079 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
76080 char *alias)
76081 {
76082 - int i, j;
76083 + unsigned int i, j;
76084
76085 sprintf(alias, "dmi*");
76086
76087 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
76088 index 2bd594e..d43245e 100644
76089 --- a/scripts/mod/modpost.c
76090 +++ b/scripts/mod/modpost.c
76091 @@ -919,6 +919,7 @@ enum mismatch {
76092 ANY_INIT_TO_ANY_EXIT,
76093 ANY_EXIT_TO_ANY_INIT,
76094 EXPORT_TO_INIT_EXIT,
76095 + DATA_TO_TEXT
76096 };
76097
76098 struct sectioncheck {
76099 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
76100 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
76101 .mismatch = EXPORT_TO_INIT_EXIT,
76102 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
76103 +},
76104 +/* Do not reference code from writable data */
76105 +{
76106 + .fromsec = { DATA_SECTIONS, NULL },
76107 + .tosec = { TEXT_SECTIONS, NULL },
76108 + .mismatch = DATA_TO_TEXT
76109 }
76110 };
76111
76112 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
76113 continue;
76114 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
76115 continue;
76116 - if (sym->st_value == addr)
76117 - return sym;
76118 /* Find a symbol nearby - addr are maybe negative */
76119 d = sym->st_value - addr;
76120 + if (d == 0)
76121 + return sym;
76122 if (d < 0)
76123 d = addr - sym->st_value;
76124 if (d < distance) {
76125 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
76126 tosym, prl_to, prl_to, tosym);
76127 free(prl_to);
76128 break;
76129 + case DATA_TO_TEXT:
76130 +/*
76131 + fprintf(stderr,
76132 + "The variable %s references\n"
76133 + "the %s %s%s%s\n",
76134 + fromsym, to, sec2annotation(tosec), tosym, to_p);
76135 +*/
76136 + break;
76137 }
76138 fprintf(stderr, "\n");
76139 }
76140 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
76141 static void check_sec_ref(struct module *mod, const char *modname,
76142 struct elf_info *elf)
76143 {
76144 - int i;
76145 + unsigned int i;
76146 Elf_Shdr *sechdrs = elf->sechdrs;
76147
76148 /* Walk through all sections */
76149 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
76150 va_end(ap);
76151 }
76152
76153 -void buf_write(struct buffer *buf, const char *s, int len)
76154 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
76155 {
76156 if (buf->size - buf->pos < len) {
76157 buf->size += len + SZ;
76158 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
76159 if (fstat(fileno(file), &st) < 0)
76160 goto close_write;
76161
76162 - if (st.st_size != b->pos)
76163 + if (st.st_size != (off_t)b->pos)
76164 goto close_write;
76165
76166 tmp = NOFAIL(malloc(b->pos));
76167 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
76168 index 2031119..b5433af 100644
76169 --- a/scripts/mod/modpost.h
76170 +++ b/scripts/mod/modpost.h
76171 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
76172
76173 struct buffer {
76174 char *p;
76175 - int pos;
76176 - int size;
76177 + unsigned int pos;
76178 + unsigned int size;
76179 };
76180
76181 void __attribute__((format(printf, 2, 3)))
76182 buf_printf(struct buffer *buf, const char *fmt, ...);
76183
76184 void
76185 -buf_write(struct buffer *buf, const char *s, int len);
76186 +buf_write(struct buffer *buf, const char *s, unsigned int len);
76187
76188 struct module {
76189 struct module *next;
76190 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
76191 index 9dfcd6d..099068e 100644
76192 --- a/scripts/mod/sumversion.c
76193 +++ b/scripts/mod/sumversion.c
76194 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
76195 goto out;
76196 }
76197
76198 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
76199 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
76200 warn("writing sum in %s failed: %s\n",
76201 filename, strerror(errno));
76202 goto out;
76203 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
76204 index 5c11312..72742b5 100644
76205 --- a/scripts/pnmtologo.c
76206 +++ b/scripts/pnmtologo.c
76207 @@ -237,14 +237,14 @@ static void write_header(void)
76208 fprintf(out, " * Linux logo %s\n", logoname);
76209 fputs(" */\n\n", out);
76210 fputs("#include <linux/linux_logo.h>\n\n", out);
76211 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
76212 + fprintf(out, "static unsigned char %s_data[] = {\n",
76213 logoname);
76214 }
76215
76216 static void write_footer(void)
76217 {
76218 fputs("\n};\n\n", out);
76219 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
76220 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
76221 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
76222 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
76223 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
76224 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
76225 fputs("\n};\n\n", out);
76226
76227 /* write logo clut */
76228 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
76229 + fprintf(out, "static unsigned char %s_clut[] = {\n",
76230 logoname);
76231 write_hex_cnt = 0;
76232 for (i = 0; i < logo_clutsize; i++) {
76233 diff --git a/security/Kconfig b/security/Kconfig
76234 index 51bd5a0..eeabc9f 100644
76235 --- a/security/Kconfig
76236 +++ b/security/Kconfig
76237 @@ -4,6 +4,627 @@
76238
76239 menu "Security options"
76240
76241 +source grsecurity/Kconfig
76242 +
76243 +menu "PaX"
76244 +
76245 + config ARCH_TRACK_EXEC_LIMIT
76246 + bool
76247 +
76248 + config PAX_KERNEXEC_PLUGIN
76249 + bool
76250 +
76251 + config PAX_PER_CPU_PGD
76252 + bool
76253 +
76254 + config TASK_SIZE_MAX_SHIFT
76255 + int
76256 + depends on X86_64
76257 + default 47 if !PAX_PER_CPU_PGD
76258 + default 42 if PAX_PER_CPU_PGD
76259 +
76260 + config PAX_ENABLE_PAE
76261 + bool
76262 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
76263 +
76264 +config PAX
76265 + bool "Enable various PaX features"
76266 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
76267 + help
76268 + This allows you to enable various PaX features. PaX adds
76269 + intrusion prevention mechanisms to the kernel that reduce
76270 + the risks posed by exploitable memory corruption bugs.
76271 +
76272 +menu "PaX Control"
76273 + depends on PAX
76274 +
76275 +config PAX_SOFTMODE
76276 + bool 'Support soft mode'
76277 + help
76278 + Enabling this option will allow you to run PaX in soft mode, that
76279 + is, PaX features will not be enforced by default, only on executables
76280 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
76281 + support as they are the only way to mark executables for soft mode use.
76282 +
76283 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76284 + line option on boot. Furthermore you can control various PaX features
76285 + at runtime via the entries in /proc/sys/kernel/pax.
76286 +
76287 +config PAX_EI_PAX
76288 + bool 'Use legacy ELF header marking'
76289 + help
76290 + Enabling this option will allow you to control PaX features on
76291 + a per executable basis via the 'chpax' utility available at
76292 + http://pax.grsecurity.net/. The control flags will be read from
76293 + an otherwise reserved part of the ELF header. This marking has
76294 + numerous drawbacks (no support for soft-mode, toolchain does not
76295 + know about the non-standard use of the ELF header) therefore it
76296 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
76297 + support.
76298 +
76299 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76300 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
76301 + option otherwise they will not get any protection.
76302 +
76303 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
76304 + support as well, they will override the legacy EI_PAX marks.
76305 +
76306 +config PAX_PT_PAX_FLAGS
76307 + bool 'Use ELF program header marking'
76308 + help
76309 + Enabling this option will allow you to control PaX features on
76310 + a per executable basis via the 'paxctl' utility available at
76311 + http://pax.grsecurity.net/. The control flags will be read from
76312 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76313 + has the benefits of supporting both soft mode and being fully
76314 + integrated into the toolchain (the binutils patch is available
76315 + from http://pax.grsecurity.net).
76316 +
76317 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76318 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76319 + support otherwise they will not get any protection.
76320 +
76321 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76322 + must make sure that the marks are the same if a binary has both marks.
76323 +
76324 + Note that if you enable the legacy EI_PAX marking support as well,
76325 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76326 +
76327 +config PAX_XATTR_PAX_FLAGS
76328 + bool 'Use filesystem extended attributes marking'
76329 + depends on EXPERT
76330 + select CIFS_XATTR if CIFS
76331 + select EXT2_FS_XATTR if EXT2_FS
76332 + select EXT3_FS_XATTR if EXT3_FS
76333 + select EXT4_FS_XATTR if EXT4_FS
76334 + select JFFS2_FS_XATTR if JFFS2_FS
76335 + select REISERFS_FS_XATTR if REISERFS_FS
76336 + select SQUASHFS_XATTR if SQUASHFS
76337 + select TMPFS_XATTR if TMPFS
76338 + select UBIFS_FS_XATTR if UBIFS_FS
76339 + help
76340 + Enabling this option will allow you to control PaX features on
76341 + a per executable basis via the 'setfattr' utility. The control
76342 + flags will be read from the user.pax.flags extended attribute of
76343 + the file. This marking has the benefit of supporting binary-only
76344 + applications that self-check themselves (e.g., skype) and would
76345 + not tolerate chpax/paxctl changes. The main drawback is that
76346 + extended attributes are not supported by some filesystems (e.g.,
76347 + isofs, udf, vfat) so copying files through such filesystems will
76348 + lose the extended attributes and these PaX markings.
76349 +
76350 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76351 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76352 + support otherwise they will not get any protection.
76353 +
76354 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76355 + must make sure that the marks are the same if a binary has both marks.
76356 +
76357 + Note that if you enable the legacy EI_PAX marking support as well,
76358 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
76359 +
76360 +choice
76361 + prompt 'MAC system integration'
76362 + default PAX_HAVE_ACL_FLAGS
76363 + help
76364 + Mandatory Access Control systems have the option of controlling
76365 + PaX flags on a per executable basis, choose the method supported
76366 + by your particular system.
76367 +
76368 + - "none": if your MAC system does not interact with PaX,
76369 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76370 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76371 +
76372 + NOTE: this option is for developers/integrators only.
76373 +
76374 + config PAX_NO_ACL_FLAGS
76375 + bool 'none'
76376 +
76377 + config PAX_HAVE_ACL_FLAGS
76378 + bool 'direct'
76379 +
76380 + config PAX_HOOK_ACL_FLAGS
76381 + bool 'hook'
76382 +endchoice
76383 +
76384 +endmenu
76385 +
76386 +menu "Non-executable pages"
76387 + depends on PAX
76388 +
76389 +config PAX_NOEXEC
76390 + bool "Enforce non-executable pages"
76391 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76392 + help
76393 + By design some architectures do not allow for protecting memory
76394 + pages against execution or even if they do, Linux does not make
76395 + use of this feature. In practice this means that if a page is
76396 + readable (such as the stack or heap) it is also executable.
76397 +
76398 + There is a well known exploit technique that makes use of this
76399 + fact and a common programming mistake where an attacker can
76400 + introduce code of his choice somewhere in the attacked program's
76401 + memory (typically the stack or the heap) and then execute it.
76402 +
76403 + If the attacked program was running with different (typically
76404 + higher) privileges than that of the attacker, then he can elevate
76405 + his own privilege level (e.g. get a root shell, write to files for
76406 + which he does not have write access to, etc).
76407 +
76408 + Enabling this option will let you choose from various features
76409 + that prevent the injection and execution of 'foreign' code in
76410 + a program.
76411 +
76412 + This will also break programs that rely on the old behaviour and
76413 + expect that dynamically allocated memory via the malloc() family
76414 + of functions is executable (which it is not). Notable examples
76415 + are the XFree86 4.x server, the java runtime and wine.
76416 +
76417 +config PAX_PAGEEXEC
76418 + bool "Paging based non-executable pages"
76419 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76420 + select S390_SWITCH_AMODE if S390
76421 + select S390_EXEC_PROTECT if S390
76422 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76423 + help
76424 + This implementation is based on the paging feature of the CPU.
76425 + On i386 without hardware non-executable bit support there is a
76426 + variable but usually low performance impact, however on Intel's
76427 + P4 core based CPUs it is very high so you should not enable this
76428 + for kernels meant to be used on such CPUs.
76429 +
76430 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76431 + with hardware non-executable bit support there is no performance
76432 + impact, on ppc the impact is negligible.
76433 +
76434 + Note that several architectures require various emulations due to
76435 + badly designed userland ABIs, this will cause a performance impact
76436 + but will disappear as soon as userland is fixed. For example, ppc
76437 + userland MUST have been built with secure-plt by a recent toolchain.
76438 +
76439 +config PAX_SEGMEXEC
76440 + bool "Segmentation based non-executable pages"
76441 + depends on PAX_NOEXEC && X86_32
76442 + help
76443 + This implementation is based on the segmentation feature of the
76444 + CPU and has a very small performance impact, however applications
76445 + will be limited to a 1.5 GB address space instead of the normal
76446 + 3 GB.
76447 +
76448 +config PAX_EMUTRAMP
76449 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76450 + default y if PARISC
76451 + help
76452 + There are some programs and libraries that for one reason or
76453 + another attempt to execute special small code snippets from
76454 + non-executable memory pages. Most notable examples are the
76455 + signal handler return code generated by the kernel itself and
76456 + the GCC trampolines.
76457 +
76458 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76459 + such programs will no longer work under your kernel.
76460 +
76461 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76462 + utilities to enable trampoline emulation for the affected programs
76463 + yet still have the protection provided by the non-executable pages.
76464 +
76465 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76466 + your system will not even boot.
76467 +
76468 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76469 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76470 + for the affected files.
76471 +
76472 + NOTE: enabling this feature *may* open up a loophole in the
76473 + protection provided by non-executable pages that an attacker
76474 + could abuse. Therefore the best solution is to not have any
76475 + files on your system that would require this option. This can
76476 + be achieved by not using libc5 (which relies on the kernel
76477 + signal handler return code) and not using or rewriting programs
76478 + that make use of the nested function implementation of GCC.
76479 + Skilled users can just fix GCC itself so that it implements
76480 + nested function calls in a way that does not interfere with PaX.
76481 +
76482 +config PAX_EMUSIGRT
76483 + bool "Automatically emulate sigreturn trampolines"
76484 + depends on PAX_EMUTRAMP && PARISC
76485 + default y
76486 + help
76487 + Enabling this option will have the kernel automatically detect
76488 + and emulate signal return trampolines executing on the stack
76489 + that would otherwise lead to task termination.
76490 +
76491 + This solution is intended as a temporary one for users with
76492 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76493 + Modula-3 runtime, etc) or executables linked to such, basically
76494 + everything that does not specify its own SA_RESTORER function in
76495 + normal executable memory like glibc 2.1+ does.
76496 +
76497 + On parisc you MUST enable this option, otherwise your system will
76498 + not even boot.
76499 +
76500 + NOTE: this feature cannot be disabled on a per executable basis
76501 + and since it *does* open up a loophole in the protection provided
76502 + by non-executable pages, the best solution is to not have any
76503 + files on your system that would require this option.
76504 +
76505 +config PAX_MPROTECT
76506 + bool "Restrict mprotect()"
76507 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76508 + help
76509 + Enabling this option will prevent programs from
76510 + - changing the executable status of memory pages that were
76511 + not originally created as executable,
76512 + - making read-only executable pages writable again,
76513 + - creating executable pages from anonymous memory,
76514 + - making read-only-after-relocations (RELRO) data pages writable again.
76515 +
76516 + You should say Y here to complete the protection provided by
76517 + the enforcement of non-executable pages.
76518 +
76519 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76520 + this feature on a per file basis.
76521 +
76522 +config PAX_MPROTECT_COMPAT
76523 + bool "Use legacy/compat protection demoting (read help)"
76524 + depends on PAX_MPROTECT
76525 + default n
76526 + help
76527 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76528 + by sending the proper error code to the application. For some broken
76529 + userland, this can cause problems with Python or other applications. The
76530 + current implementation however allows for applications like clamav to
76531 + detect if JIT compilation/execution is allowed and to fall back gracefully
76532 + to an interpreter-based mode if it does not. While we encourage everyone
76533 + to use the current implementation as-is and push upstream to fix broken
76534 + userland (note that the RWX logging option can assist with this), in some
76535 + environments this may not be possible. Having to disable MPROTECT
76536 + completely on certain binaries reduces the security benefit of PaX,
76537 + so this option is provided for those environments to revert to the old
76538 + behavior.
76539 +
76540 +config PAX_ELFRELOCS
76541 + bool "Allow ELF text relocations (read help)"
76542 + depends on PAX_MPROTECT
76543 + default n
76544 + help
76545 + Non-executable pages and mprotect() restrictions are effective
76546 + in preventing the introduction of new executable code into an
76547 + attacked task's address space. There remain only two venues
76548 + for this kind of attack: if the attacker can execute already
76549 + existing code in the attacked task then he can either have it
76550 + create and mmap() a file containing his code or have it mmap()
76551 + an already existing ELF library that does not have position
76552 + independent code in it and use mprotect() on it to make it
76553 + writable and copy his code there. While protecting against
76554 + the former approach is beyond PaX, the latter can be prevented
76555 + by having only PIC ELF libraries on one's system (which do not
76556 + need to relocate their code). If you are sure this is your case,
76557 + as is the case with all modern Linux distributions, then leave
76558 + this option disabled. You should say 'n' here.
76559 +
76560 +config PAX_ETEXECRELOCS
76561 + bool "Allow ELF ET_EXEC text relocations"
76562 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76563 + select PAX_ELFRELOCS
76564 + default y
76565 + help
76566 + On some architectures there are incorrectly created applications
76567 + that require text relocations and would not work without enabling
76568 + this option. If you are an alpha, ia64 or parisc user, you should
76569 + enable this option and disable it once you have made sure that
76570 + none of your applications need it.
76571 +
76572 +config PAX_EMUPLT
76573 + bool "Automatically emulate ELF PLT"
76574 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76575 + default y
76576 + help
76577 + Enabling this option will have the kernel automatically detect
76578 + and emulate the Procedure Linkage Table entries in ELF files.
76579 + On some architectures such entries are in writable memory, and
76580 + become non-executable leading to task termination. Therefore
76581 + it is mandatory that you enable this option on alpha, parisc,
76582 + sparc and sparc64, otherwise your system would not even boot.
76583 +
76584 + NOTE: this feature *does* open up a loophole in the protection
76585 + provided by the non-executable pages, therefore the proper
76586 + solution is to modify the toolchain to produce a PLT that does
76587 + not need to be writable.
76588 +
76589 +config PAX_DLRESOLVE
76590 + bool 'Emulate old glibc resolver stub'
76591 + depends on PAX_EMUPLT && SPARC
76592 + default n
76593 + help
76594 + This option is needed if userland has an old glibc (before 2.4)
76595 + that puts a 'save' instruction into the runtime generated resolver
76596 + stub that needs special emulation.
76597 +
76598 +config PAX_KERNEXEC
76599 + bool "Enforce non-executable kernel pages"
76600 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76601 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76602 + select PAX_KERNEXEC_PLUGIN if X86_64
76603 + help
76604 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76605 + that is, enabling this option will make it harder to inject
76606 + and execute 'foreign' code in kernel memory itself.
76607 +
76608 + Note that on x86_64 kernels there is a known regression when
76609 + this feature and KVM/VMX are both enabled in the host kernel.
76610 +
76611 +choice
76612 + prompt "Return Address Instrumentation Method"
76613 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76614 + depends on PAX_KERNEXEC_PLUGIN
76615 + help
76616 + Select the method used to instrument function pointer dereferences.
76617 + Note that binary modules cannot be instrumented by this approach.
76618 +
76619 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76620 + bool "bts"
76621 + help
76622 + This method is compatible with binary only modules but has
76623 + a higher runtime overhead.
76624 +
76625 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76626 + bool "or"
76627 + depends on !PARAVIRT
76628 + help
76629 + This method is incompatible with binary only modules but has
76630 + a lower runtime overhead.
76631 +endchoice
76632 +
76633 +config PAX_KERNEXEC_PLUGIN_METHOD
76634 + string
76635 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76636 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76637 + default ""
76638 +
76639 +config PAX_KERNEXEC_MODULE_TEXT
76640 + int "Minimum amount of memory reserved for module code"
76641 + default "4"
76642 + depends on PAX_KERNEXEC && X86_32 && MODULES
76643 + help
76644 + Due to implementation details the kernel must reserve a fixed
76645 + amount of memory for module code at compile time that cannot be
76646 + changed at runtime. Here you can specify the minimum amount
76647 + in MB that will be reserved. Due to the same implementation
76648 + details this size will always be rounded up to the next 2/4 MB
76649 + boundary (depends on PAE) so the actually available memory for
76650 + module code will usually be more than this minimum.
76651 +
76652 + The default 4 MB should be enough for most users but if you have
76653 + an excessive number of modules (e.g., most distribution configs
76654 + compile many drivers as modules) or use huge modules such as
76655 + nvidia's kernel driver, you will need to adjust this amount.
76656 + A good rule of thumb is to look at your currently loaded kernel
76657 + modules and add up their sizes.
76658 +
76659 +endmenu
76660 +
76661 +menu "Address Space Layout Randomization"
76662 + depends on PAX
76663 +
76664 +config PAX_ASLR
76665 + bool "Address Space Layout Randomization"
76666 + help
76667 + Many if not most exploit techniques rely on the knowledge of
76668 + certain addresses in the attacked program. The following options
76669 + will allow the kernel to apply a certain amount of randomization
76670 + to specific parts of the program thereby forcing an attacker to
76671 + guess them in most cases. Any failed guess will most likely crash
76672 + the attacked program which allows the kernel to detect such attempts
76673 + and react on them. PaX itself provides no reaction mechanisms,
76674 + instead it is strongly encouraged that you make use of Nergal's
76675 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76676 + (http://www.grsecurity.net/) built-in crash detection features or
76677 + develop one yourself.
76678 +
76679 + By saying Y here you can choose to randomize the following areas:
76680 + - top of the task's kernel stack
76681 + - top of the task's userland stack
76682 + - base address for mmap() requests that do not specify one
76683 + (this includes all libraries)
76684 + - base address of the main executable
76685 +
76686 + It is strongly recommended to say Y here as address space layout
76687 + randomization has negligible impact on performance yet it provides
76688 + a very effective protection.
76689 +
76690 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76691 + this feature on a per file basis.
76692 +
76693 +config PAX_RANDKSTACK
76694 + bool "Randomize kernel stack base"
76695 + depends on X86_TSC && X86
76696 + help
76697 + By saying Y here the kernel will randomize every task's kernel
76698 + stack on every system call. This will not only force an attacker
76699 + to guess it but also prevent him from making use of possible
76700 + leaked information about it.
76701 +
76702 + Since the kernel stack is a rather scarce resource, randomization
76703 + may cause unexpected stack overflows, therefore you should very
76704 + carefully test your system. Note that once enabled in the kernel
76705 + configuration, this feature cannot be disabled on a per file basis.
76706 +
76707 +config PAX_RANDUSTACK
76708 + bool "Randomize user stack base"
76709 + depends on PAX_ASLR
76710 + help
76711 + By saying Y here the kernel will randomize every task's userland
76712 + stack. The randomization is done in two steps where the second
76713 + one may apply a big amount of shift to the top of the stack and
76714 + cause problems for programs that want to use lots of memory (more
76715 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76716 + For this reason the second step can be controlled by 'chpax' or
76717 + 'paxctl' on a per file basis.
76718 +
76719 +config PAX_RANDMMAP
76720 + bool "Randomize mmap() base"
76721 + depends on PAX_ASLR
76722 + help
76723 + By saying Y here the kernel will use a randomized base address for
76724 + mmap() requests that do not specify one themselves. As a result
76725 + all dynamically loaded libraries will appear at random addresses
76726 + and therefore be harder to exploit by a technique where an attacker
76727 + attempts to execute library code for his purposes (e.g. spawn a
76728 + shell from an exploited program that is running at an elevated
76729 + privilege level).
76730 +
76731 + Furthermore, if a program is relinked as a dynamic ELF file, its
76732 + base address will be randomized as well, completing the full
76733 + randomization of the address space layout. Attacking such programs
76734 + becomes a guess game. You can find an example of doing this at
76735 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76736 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76737 +
76738 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76739 + feature on a per file basis.
76740 +
76741 +endmenu
76742 +
76743 +menu "Miscellaneous hardening features"
76744 +
76745 +config PAX_MEMORY_SANITIZE
76746 + bool "Sanitize all freed memory"
76747 + depends on !HIBERNATION
76748 + help
76749 + By saying Y here the kernel will erase memory pages as soon as they
76750 + are freed. This in turn reduces the lifetime of data stored in the
76751 + pages, making it less likely that sensitive information such as
76752 + passwords, cryptographic secrets, etc stay in memory for too long.
76753 +
76754 + This is especially useful for programs whose runtime is short, long
76755 + lived processes and the kernel itself benefit from this as long as
76756 + they operate on whole memory pages and ensure timely freeing of pages
76757 + that may hold sensitive information.
76758 +
76759 + The tradeoff is performance impact, on a single CPU system kernel
76760 + compilation sees a 3% slowdown, other systems and workloads may vary
76761 + and you are advised to test this feature on your expected workload
76762 + before deploying it.
76763 +
76764 + Note that this feature does not protect data stored in live pages,
76765 + e.g., process memory swapped to disk may stay there for a long time.
76766 +
76767 +config PAX_MEMORY_STACKLEAK
76768 + bool "Sanitize kernel stack"
76769 + depends on X86
76770 + help
76771 + By saying Y here the kernel will erase the kernel stack before it
76772 + returns from a system call. This in turn reduces the information
76773 + that a kernel stack leak bug can reveal.
76774 +
76775 + Note that such a bug can still leak information that was put on
76776 + the stack by the current system call (the one eventually triggering
76777 + the bug) but traces of earlier system calls on the kernel stack
76778 + cannot leak anymore.
76779 +
76780 + The tradeoff is performance impact: on a single CPU system kernel
76781 + compilation sees a 1% slowdown, other systems and workloads may vary
76782 + and you are advised to test this feature on your expected workload
76783 + before deploying it.
76784 +
76785 + Note: full support for this feature requires gcc with plugin support
76786 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76787 + versions means that functions with large enough stack frames may
76788 + leave uninitialized memory behind that may be exposed to a later
76789 + syscall leaking the stack.
76790 +
76791 +config PAX_MEMORY_UDEREF
76792 + bool "Prevent invalid userland pointer dereference"
76793 + depends on X86 && !UML_X86 && !XEN
76794 + select PAX_PER_CPU_PGD if X86_64
76795 + help
76796 + By saying Y here the kernel will be prevented from dereferencing
76797 + userland pointers in contexts where the kernel expects only kernel
76798 + pointers. This is both a useful runtime debugging feature and a
76799 + security measure that prevents exploiting a class of kernel bugs.
76800 +
76801 + The tradeoff is that some virtualization solutions may experience
76802 + a huge slowdown and therefore you should not enable this feature
76803 + for kernels meant to run in such environments. Whether a given VM
76804 + solution is affected or not is best determined by simply trying it
76805 + out, the performance impact will be obvious right on boot as this
76806 + mechanism engages from very early on. A good rule of thumb is that
76807 + VMs running on CPUs without hardware virtualization support (i.e.,
76808 + the majority of IA-32 CPUs) will likely experience the slowdown.
76809 +
76810 +config PAX_REFCOUNT
76811 + bool "Prevent various kernel object reference counter overflows"
76812 + depends on GRKERNSEC && (X86 || SPARC64)
76813 + help
76814 + By saying Y here the kernel will detect and prevent overflowing
76815 + various (but not all) kinds of object reference counters. Such
76816 + overflows can normally occur due to bugs only and are often, if
76817 + not always, exploitable.
76818 +
76819 + The tradeoff is that data structures protected by an overflowed
76820 + refcount will never be freed and therefore will leak memory. Note
76821 + that this leak also happens even without this protection but in
76822 + that case the overflow can eventually trigger the freeing of the
76823 + data structure while it is still being used elsewhere, resulting
76824 + in the exploitable situation that this feature prevents.
76825 +
76826 + Since this has a negligible performance impact, you should enable
76827 + this feature.
76828 +
76829 +config PAX_USERCOPY
76830 + bool "Harden heap object copies between kernel and userland"
76831 + depends on X86 || PPC || SPARC || ARM
76832 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76833 + help
76834 + By saying Y here the kernel will enforce the size of heap objects
76835 + when they are copied in either direction between the kernel and
76836 + userland, even if only a part of the heap object is copied.
76837 +
76838 + Specifically, this checking prevents information leaking from the
76839 + kernel heap during kernel to userland copies (if the kernel heap
76840 + object is otherwise fully initialized) and prevents kernel heap
76841 + overflows during userland to kernel copies.
76842 +
76843 + Note that the current implementation provides the strictest bounds
76844 + checks for the SLUB allocator.
76845 +
76846 + Enabling this option also enables per-slab cache protection against
76847 + data in a given cache being copied into/out of via userland
76848 + accessors. Though the whitelist of regions will be reduced over
76849 + time, it notably protects important data structures like task structs.
76850 +
76851 + If frame pointers are enabled on x86, this option will also restrict
76852 + copies into and out of the kernel stack to local variables within a
76853 + single frame.
76854 +
76855 + Since this has a negligible performance impact, you should enable
76856 + this feature.
76857 +
76858 +endmenu
76859 +
76860 +endmenu
76861 +
76862 config KEYS
76863 bool "Enable access key retention support"
76864 help
76865 @@ -169,7 +790,7 @@ config INTEL_TXT
76866 config LSM_MMAP_MIN_ADDR
76867 int "Low address space for LSM to protect from user allocation"
76868 depends on SECURITY && SECURITY_SELINUX
76869 - default 32768 if ARM
76870 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76871 default 65536
76872 help
76873 This is the portion of low virtual memory which should be protected
76874 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76875 index 3783202..1852837 100644
76876 --- a/security/apparmor/lsm.c
76877 +++ b/security/apparmor/lsm.c
76878 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76879 return error;
76880 }
76881
76882 -static struct security_operations apparmor_ops = {
76883 +static struct security_operations apparmor_ops __read_only = {
76884 .name = "apparmor",
76885
76886 .ptrace_access_check = apparmor_ptrace_access_check,
76887 diff --git a/security/commoncap.c b/security/commoncap.c
76888 index ee4f848..a320c64 100644
76889 --- a/security/commoncap.c
76890 +++ b/security/commoncap.c
76891 @@ -28,6 +28,7 @@
76892 #include <linux/prctl.h>
76893 #include <linux/securebits.h>
76894 #include <linux/user_namespace.h>
76895 +#include <net/sock.h>
76896
76897 /*
76898 * If a non-root user executes a setuid-root binary in
76899 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76900
76901 int cap_netlink_recv(struct sk_buff *skb, int cap)
76902 {
76903 - if (!cap_raised(current_cap(), cap))
76904 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76905 return -EPERM;
76906 return 0;
76907 }
76908 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76909 {
76910 const struct cred *cred = current_cred();
76911
76912 + if (gr_acl_enable_at_secure())
76913 + return 1;
76914 +
76915 if (cred->uid != 0) {
76916 if (bprm->cap_effective)
76917 return 1;
76918 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76919 index 3ccf7ac..d73ad64 100644
76920 --- a/security/integrity/ima/ima.h
76921 +++ b/security/integrity/ima/ima.h
76922 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76923 extern spinlock_t ima_queue_lock;
76924
76925 struct ima_h_table {
76926 - atomic_long_t len; /* number of stored measurements in the list */
76927 - atomic_long_t violations;
76928 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76929 + atomic_long_unchecked_t violations;
76930 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76931 };
76932 extern struct ima_h_table ima_htable;
76933 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76934 index 88a2788..581ab92 100644
76935 --- a/security/integrity/ima/ima_api.c
76936 +++ b/security/integrity/ima/ima_api.c
76937 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76938 int result;
76939
76940 /* can overflow, only indicator */
76941 - atomic_long_inc(&ima_htable.violations);
76942 + atomic_long_inc_unchecked(&ima_htable.violations);
76943
76944 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76945 if (!entry) {
76946 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76947 index c5c5a72..2ad942f 100644
76948 --- a/security/integrity/ima/ima_audit.c
76949 +++ b/security/integrity/ima/ima_audit.c
76950 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76951 audit_log_format(ab, " name=");
76952 audit_log_untrustedstring(ab, fname);
76953 }
76954 - if (inode)
76955 - audit_log_format(ab, " dev=%s ino=%lu",
76956 - inode->i_sb->s_id, inode->i_ino);
76957 + if (inode) {
76958 + audit_log_format(ab, " dev=");
76959 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76960 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76961 + }
76962 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76963 audit_log_end(ab);
76964 }
76965 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76966 index e1aa2b4..52027bf 100644
76967 --- a/security/integrity/ima/ima_fs.c
76968 +++ b/security/integrity/ima/ima_fs.c
76969 @@ -28,12 +28,12 @@
76970 static int valid_policy = 1;
76971 #define TMPBUFLEN 12
76972 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76973 - loff_t *ppos, atomic_long_t *val)
76974 + loff_t *ppos, atomic_long_unchecked_t *val)
76975 {
76976 char tmpbuf[TMPBUFLEN];
76977 ssize_t len;
76978
76979 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
76980 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
76981 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
76982 }
76983
76984 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
76985 index 55a6271..ad829c3 100644
76986 --- a/security/integrity/ima/ima_queue.c
76987 +++ b/security/integrity/ima/ima_queue.c
76988 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
76989 INIT_LIST_HEAD(&qe->later);
76990 list_add_tail_rcu(&qe->later, &ima_measurements);
76991
76992 - atomic_long_inc(&ima_htable.len);
76993 + atomic_long_inc_unchecked(&ima_htable.len);
76994 key = ima_hash_key(entry->digest);
76995 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
76996 return 0;
76997 diff --git a/security/keys/compat.c b/security/keys/compat.c
76998 index 4c48e13..7abdac9 100644
76999 --- a/security/keys/compat.c
77000 +++ b/security/keys/compat.c
77001 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
77002 if (ret == 0)
77003 goto no_payload_free;
77004
77005 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77006 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77007
77008 if (iov != iovstack)
77009 kfree(iov);
77010 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
77011 index 0b3f5d7..892c8a6 100644
77012 --- a/security/keys/keyctl.c
77013 +++ b/security/keys/keyctl.c
77014 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
77015 /*
77016 * Copy the iovec data from userspace
77017 */
77018 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77019 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
77020 unsigned ioc)
77021 {
77022 for (; ioc > 0; ioc--) {
77023 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
77024 * If successful, 0 will be returned.
77025 */
77026 long keyctl_instantiate_key_common(key_serial_t id,
77027 - const struct iovec *payload_iov,
77028 + const struct iovec __user *payload_iov,
77029 unsigned ioc,
77030 size_t plen,
77031 key_serial_t ringid)
77032 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
77033 [0].iov_len = plen
77034 };
77035
77036 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
77037 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
77038 }
77039
77040 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
77041 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
77042 if (ret == 0)
77043 goto no_payload_free;
77044
77045 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
77046 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
77047
77048 if (iov != iovstack)
77049 kfree(iov);
77050 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
77051 index 37a7f3b..86dc19f 100644
77052 --- a/security/keys/keyring.c
77053 +++ b/security/keys/keyring.c
77054 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
77055 ret = -EFAULT;
77056
77057 for (loop = 0; loop < klist->nkeys; loop++) {
77058 + key_serial_t serial;
77059 key = klist->keys[loop];
77060 + serial = key->serial;
77061
77062 tmp = sizeof(key_serial_t);
77063 if (tmp > buflen)
77064 tmp = buflen;
77065
77066 - if (copy_to_user(buffer,
77067 - &key->serial,
77068 - tmp) != 0)
77069 + if (copy_to_user(buffer, &serial, tmp))
77070 goto error;
77071
77072 buflen -= tmp;
77073 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
77074 index 893af8a..ba9237c 100644
77075 --- a/security/lsm_audit.c
77076 +++ b/security/lsm_audit.c
77077 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77078 audit_log_d_path(ab, "path=", &a->u.path);
77079
77080 inode = a->u.path.dentry->d_inode;
77081 - if (inode)
77082 - audit_log_format(ab, " dev=%s ino=%lu",
77083 - inode->i_sb->s_id,
77084 - inode->i_ino);
77085 + if (inode) {
77086 + audit_log_format(ab, " dev=");
77087 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77088 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77089 + }
77090 break;
77091 }
77092 case LSM_AUDIT_DATA_DENTRY: {
77093 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77094 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
77095
77096 inode = a->u.dentry->d_inode;
77097 - if (inode)
77098 - audit_log_format(ab, " dev=%s ino=%lu",
77099 - inode->i_sb->s_id,
77100 - inode->i_ino);
77101 + if (inode) {
77102 + audit_log_format(ab, " dev=");
77103 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77104 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77105 + }
77106 break;
77107 }
77108 case LSM_AUDIT_DATA_INODE: {
77109 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
77110 dentry->d_name.name);
77111 dput(dentry);
77112 }
77113 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
77114 - inode->i_ino);
77115 + audit_log_format(ab, " dev=");
77116 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
77117 + audit_log_format(ab, " ino=%lu", inode->i_ino);
77118 break;
77119 }
77120 case LSM_AUDIT_DATA_TASK:
77121 diff --git a/security/min_addr.c b/security/min_addr.c
77122 index f728728..6457a0c 100644
77123 --- a/security/min_addr.c
77124 +++ b/security/min_addr.c
77125 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
77126 */
77127 static void update_mmap_min_addr(void)
77128 {
77129 +#ifndef SPARC
77130 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
77131 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
77132 mmap_min_addr = dac_mmap_min_addr;
77133 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
77134 #else
77135 mmap_min_addr = dac_mmap_min_addr;
77136 #endif
77137 +#endif
77138 }
77139
77140 /*
77141 diff --git a/security/security.c b/security/security.c
77142 index e2f684a..8d62ef5 100644
77143 --- a/security/security.c
77144 +++ b/security/security.c
77145 @@ -26,8 +26,8 @@
77146 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
77147 CONFIG_DEFAULT_SECURITY;
77148
77149 -static struct security_operations *security_ops;
77150 -static struct security_operations default_security_ops = {
77151 +static struct security_operations *security_ops __read_only;
77152 +static struct security_operations default_security_ops __read_only = {
77153 .name = "default",
77154 };
77155
77156 @@ -68,7 +68,9 @@ int __init security_init(void)
77157
77158 void reset_security_ops(void)
77159 {
77160 + pax_open_kernel();
77161 security_ops = &default_security_ops;
77162 + pax_close_kernel();
77163 }
77164
77165 /* Save user chosen LSM */
77166 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
77167 index 1126c10..effb32b 100644
77168 --- a/security/selinux/hooks.c
77169 +++ b/security/selinux/hooks.c
77170 @@ -94,8 +94,6 @@
77171
77172 #define NUM_SEL_MNT_OPTS 5
77173
77174 -extern struct security_operations *security_ops;
77175 -
77176 /* SECMARK reference count */
77177 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
77178
77179 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
77180
77181 #endif
77182
77183 -static struct security_operations selinux_ops = {
77184 +static struct security_operations selinux_ops __read_only = {
77185 .name = "selinux",
77186
77187 .ptrace_access_check = selinux_ptrace_access_check,
77188 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
77189 index b43813c..74be837 100644
77190 --- a/security/selinux/include/xfrm.h
77191 +++ b/security/selinux/include/xfrm.h
77192 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
77193
77194 static inline void selinux_xfrm_notify_policyload(void)
77195 {
77196 - atomic_inc(&flow_cache_genid);
77197 + atomic_inc_unchecked(&flow_cache_genid);
77198 }
77199 #else
77200 static inline int selinux_xfrm_enabled(void)
77201 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
77202 index 7db62b4..ee4d949 100644
77203 --- a/security/smack/smack_lsm.c
77204 +++ b/security/smack/smack_lsm.c
77205 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
77206 return 0;
77207 }
77208
77209 -struct security_operations smack_ops = {
77210 +struct security_operations smack_ops __read_only = {
77211 .name = "smack",
77212
77213 .ptrace_access_check = smack_ptrace_access_check,
77214 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
77215 index 4b327b6..646c57a 100644
77216 --- a/security/tomoyo/tomoyo.c
77217 +++ b/security/tomoyo/tomoyo.c
77218 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
77219 * tomoyo_security_ops is a "struct security_operations" which is used for
77220 * registering TOMOYO.
77221 */
77222 -static struct security_operations tomoyo_security_ops = {
77223 +static struct security_operations tomoyo_security_ops __read_only = {
77224 .name = "tomoyo",
77225 .cred_alloc_blank = tomoyo_cred_alloc_blank,
77226 .cred_prepare = tomoyo_cred_prepare,
77227 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
77228 index 762af68..7103453 100644
77229 --- a/sound/aoa/codecs/onyx.c
77230 +++ b/sound/aoa/codecs/onyx.c
77231 @@ -54,7 +54,7 @@ struct onyx {
77232 spdif_locked:1,
77233 analog_locked:1,
77234 original_mute:2;
77235 - int open_count;
77236 + local_t open_count;
77237 struct codec_info *codec_info;
77238
77239 /* mutex serializes concurrent access to the device
77240 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
77241 struct onyx *onyx = cii->codec_data;
77242
77243 mutex_lock(&onyx->mutex);
77244 - onyx->open_count++;
77245 + local_inc(&onyx->open_count);
77246 mutex_unlock(&onyx->mutex);
77247
77248 return 0;
77249 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
77250 struct onyx *onyx = cii->codec_data;
77251
77252 mutex_lock(&onyx->mutex);
77253 - onyx->open_count--;
77254 - if (!onyx->open_count)
77255 + if (local_dec_and_test(&onyx->open_count))
77256 onyx->spdif_locked = onyx->analog_locked = 0;
77257 mutex_unlock(&onyx->mutex);
77258
77259 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
77260 index ffd2025..df062c9 100644
77261 --- a/sound/aoa/codecs/onyx.h
77262 +++ b/sound/aoa/codecs/onyx.h
77263 @@ -11,6 +11,7 @@
77264 #include <linux/i2c.h>
77265 #include <asm/pmac_low_i2c.h>
77266 #include <asm/prom.h>
77267 +#include <asm/local.h>
77268
77269 /* PCM3052 register definitions */
77270
77271 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
77272 index 3cc4b86..af0a951 100644
77273 --- a/sound/core/oss/pcm_oss.c
77274 +++ b/sound/core/oss/pcm_oss.c
77275 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
77276 if (in_kernel) {
77277 mm_segment_t fs;
77278 fs = snd_enter_user();
77279 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77280 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77281 snd_leave_user(fs);
77282 } else {
77283 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77284 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77285 }
77286 if (ret != -EPIPE && ret != -ESTRPIPE)
77287 break;
77288 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
77289 if (in_kernel) {
77290 mm_segment_t fs;
77291 fs = snd_enter_user();
77292 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77293 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77294 snd_leave_user(fs);
77295 } else {
77296 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77297 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77298 }
77299 if (ret == -EPIPE) {
77300 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
77301 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
77302 struct snd_pcm_plugin_channel *channels;
77303 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
77304 if (!in_kernel) {
77305 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
77306 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
77307 return -EFAULT;
77308 buf = runtime->oss.buffer;
77309 }
77310 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
77311 }
77312 } else {
77313 tmp = snd_pcm_oss_write2(substream,
77314 - (const char __force *)buf,
77315 + (const char __force_kernel *)buf,
77316 runtime->oss.period_bytes, 0);
77317 if (tmp <= 0)
77318 goto err;
77319 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
77320 struct snd_pcm_runtime *runtime = substream->runtime;
77321 snd_pcm_sframes_t frames, frames1;
77322 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
77323 - char __user *final_dst = (char __force __user *)buf;
77324 + char __user *final_dst = (char __force_user *)buf;
77325 if (runtime->oss.plugin_first) {
77326 struct snd_pcm_plugin_channel *channels;
77327 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
77328 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
77329 xfer += tmp;
77330 runtime->oss.buffer_used -= tmp;
77331 } else {
77332 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
77333 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
77334 runtime->oss.period_bytes, 0);
77335 if (tmp <= 0)
77336 goto err;
77337 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
77338 size1);
77339 size1 /= runtime->channels; /* frames */
77340 fs = snd_enter_user();
77341 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
77342 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
77343 snd_leave_user(fs);
77344 }
77345 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
77346 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
77347 index 91cdf94..4085161 100644
77348 --- a/sound/core/pcm_compat.c
77349 +++ b/sound/core/pcm_compat.c
77350 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
77351 int err;
77352
77353 fs = snd_enter_user();
77354 - err = snd_pcm_delay(substream, &delay);
77355 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
77356 snd_leave_user(fs);
77357 if (err < 0)
77358 return err;
77359 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
77360 index 25ed9fe..24c46e9 100644
77361 --- a/sound/core/pcm_native.c
77362 +++ b/sound/core/pcm_native.c
77363 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
77364 switch (substream->stream) {
77365 case SNDRV_PCM_STREAM_PLAYBACK:
77366 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
77367 - (void __user *)arg);
77368 + (void __force_user *)arg);
77369 break;
77370 case SNDRV_PCM_STREAM_CAPTURE:
77371 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
77372 - (void __user *)arg);
77373 + (void __force_user *)arg);
77374 break;
77375 default:
77376 result = -EINVAL;
77377 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
77378 index 5cf8d65..912a79c 100644
77379 --- a/sound/core/seq/seq_device.c
77380 +++ b/sound/core/seq/seq_device.c
77381 @@ -64,7 +64,7 @@ struct ops_list {
77382 int argsize; /* argument size */
77383
77384 /* operators */
77385 - struct snd_seq_dev_ops ops;
77386 + struct snd_seq_dev_ops *ops;
77387
77388 /* registred devices */
77389 struct list_head dev_list; /* list of devices */
77390 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77391
77392 mutex_lock(&ops->reg_mutex);
77393 /* copy driver operators */
77394 - ops->ops = *entry;
77395 + ops->ops = entry;
77396 ops->driver |= DRIVER_LOADED;
77397 ops->argsize = argsize;
77398
77399 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77400 dev->name, ops->id, ops->argsize, dev->argsize);
77401 return -EINVAL;
77402 }
77403 - if (ops->ops.init_device(dev) >= 0) {
77404 + if (ops->ops->init_device(dev) >= 0) {
77405 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77406 ops->num_init_devices++;
77407 } else {
77408 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77409 dev->name, ops->id, ops->argsize, dev->argsize);
77410 return -EINVAL;
77411 }
77412 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77413 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77414 dev->status = SNDRV_SEQ_DEVICE_FREE;
77415 dev->driver_data = NULL;
77416 ops->num_init_devices--;
77417 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77418 index f24bf9a..1f7b67c 100644
77419 --- a/sound/drivers/mts64.c
77420 +++ b/sound/drivers/mts64.c
77421 @@ -29,6 +29,7 @@
77422 #include <sound/initval.h>
77423 #include <sound/rawmidi.h>
77424 #include <sound/control.h>
77425 +#include <asm/local.h>
77426
77427 #define CARD_NAME "Miditerminal 4140"
77428 #define DRIVER_NAME "MTS64"
77429 @@ -67,7 +68,7 @@ struct mts64 {
77430 struct pardevice *pardev;
77431 int pardev_claimed;
77432
77433 - int open_count;
77434 + local_t open_count;
77435 int current_midi_output_port;
77436 int current_midi_input_port;
77437 u8 mode[MTS64_NUM_INPUT_PORTS];
77438 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77439 {
77440 struct mts64 *mts = substream->rmidi->private_data;
77441
77442 - if (mts->open_count == 0) {
77443 + if (local_read(&mts->open_count) == 0) {
77444 /* We don't need a spinlock here, because this is just called
77445 if the device has not been opened before.
77446 So there aren't any IRQs from the device */
77447 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77448
77449 msleep(50);
77450 }
77451 - ++(mts->open_count);
77452 + local_inc(&mts->open_count);
77453
77454 return 0;
77455 }
77456 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77457 struct mts64 *mts = substream->rmidi->private_data;
77458 unsigned long flags;
77459
77460 - --(mts->open_count);
77461 - if (mts->open_count == 0) {
77462 + if (local_dec_return(&mts->open_count) == 0) {
77463 /* We need the spinlock_irqsave here because we can still
77464 have IRQs at this point */
77465 spin_lock_irqsave(&mts->lock, flags);
77466 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77467
77468 msleep(500);
77469
77470 - } else if (mts->open_count < 0)
77471 - mts->open_count = 0;
77472 + } else if (local_read(&mts->open_count) < 0)
77473 + local_set(&mts->open_count, 0);
77474
77475 return 0;
77476 }
77477 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77478 index b953fb4..1999c01 100644
77479 --- a/sound/drivers/opl4/opl4_lib.c
77480 +++ b/sound/drivers/opl4/opl4_lib.c
77481 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77482 MODULE_DESCRIPTION("OPL4 driver");
77483 MODULE_LICENSE("GPL");
77484
77485 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77486 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77487 {
77488 int timeout = 10;
77489 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77490 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77491 index f664823..590c745 100644
77492 --- a/sound/drivers/portman2x4.c
77493 +++ b/sound/drivers/portman2x4.c
77494 @@ -48,6 +48,7 @@
77495 #include <sound/initval.h>
77496 #include <sound/rawmidi.h>
77497 #include <sound/control.h>
77498 +#include <asm/local.h>
77499
77500 #define CARD_NAME "Portman 2x4"
77501 #define DRIVER_NAME "portman"
77502 @@ -85,7 +86,7 @@ struct portman {
77503 struct pardevice *pardev;
77504 int pardev_claimed;
77505
77506 - int open_count;
77507 + local_t open_count;
77508 int mode[PORTMAN_NUM_INPUT_PORTS];
77509 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77510 };
77511 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77512 index 87657dd..a8268d4 100644
77513 --- a/sound/firewire/amdtp.c
77514 +++ b/sound/firewire/amdtp.c
77515 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77516 ptr = s->pcm_buffer_pointer + data_blocks;
77517 if (ptr >= pcm->runtime->buffer_size)
77518 ptr -= pcm->runtime->buffer_size;
77519 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77520 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77521
77522 s->pcm_period_pointer += data_blocks;
77523 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77524 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77525 */
77526 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77527 {
77528 - ACCESS_ONCE(s->source_node_id_field) =
77529 + ACCESS_ONCE_RW(s->source_node_id_field) =
77530 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77531 }
77532 EXPORT_SYMBOL(amdtp_out_stream_update);
77533 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77534 index 537a9cb..8e8c8e9 100644
77535 --- a/sound/firewire/amdtp.h
77536 +++ b/sound/firewire/amdtp.h
77537 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77538 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77539 struct snd_pcm_substream *pcm)
77540 {
77541 - ACCESS_ONCE(s->pcm) = pcm;
77542 + ACCESS_ONCE_RW(s->pcm) = pcm;
77543 }
77544
77545 /**
77546 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77547 index cd094ec..eca1277 100644
77548 --- a/sound/firewire/isight.c
77549 +++ b/sound/firewire/isight.c
77550 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77551 ptr += count;
77552 if (ptr >= runtime->buffer_size)
77553 ptr -= runtime->buffer_size;
77554 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77555 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77556
77557 isight->period_counter += count;
77558 if (isight->period_counter >= runtime->period_size) {
77559 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77560 if (err < 0)
77561 return err;
77562
77563 - ACCESS_ONCE(isight->pcm_active) = true;
77564 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77565
77566 return 0;
77567 }
77568 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77569 {
77570 struct isight *isight = substream->private_data;
77571
77572 - ACCESS_ONCE(isight->pcm_active) = false;
77573 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77574
77575 mutex_lock(&isight->mutex);
77576 isight_stop_streaming(isight);
77577 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77578
77579 switch (cmd) {
77580 case SNDRV_PCM_TRIGGER_START:
77581 - ACCESS_ONCE(isight->pcm_running) = true;
77582 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77583 break;
77584 case SNDRV_PCM_TRIGGER_STOP:
77585 - ACCESS_ONCE(isight->pcm_running) = false;
77586 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77587 break;
77588 default:
77589 return -EINVAL;
77590 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77591 index c94578d..0794ac1 100644
77592 --- a/sound/isa/cmi8330.c
77593 +++ b/sound/isa/cmi8330.c
77594 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77595
77596 struct snd_pcm *pcm;
77597 struct snd_cmi8330_stream {
77598 - struct snd_pcm_ops ops;
77599 + snd_pcm_ops_no_const ops;
77600 snd_pcm_open_callback_t open;
77601 void *private_data; /* sb or wss */
77602 } streams[2];
77603 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77604 index 733b014..56ce96f 100644
77605 --- a/sound/oss/sb_audio.c
77606 +++ b/sound/oss/sb_audio.c
77607 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77608 buf16 = (signed short *)(localbuf + localoffs);
77609 while (c)
77610 {
77611 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77612 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77613 if (copy_from_user(lbuf8,
77614 userbuf+useroffs + p,
77615 locallen))
77616 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77617 index 09d4648..cf234c7 100644
77618 --- a/sound/oss/swarm_cs4297a.c
77619 +++ b/sound/oss/swarm_cs4297a.c
77620 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77621 {
77622 struct cs4297a_state *s;
77623 u32 pwr, id;
77624 - mm_segment_t fs;
77625 int rval;
77626 #ifndef CONFIG_BCM_CS4297A_CSWARM
77627 u64 cfg;
77628 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77629 if (!rval) {
77630 char *sb1250_duart_present;
77631
77632 +#if 0
77633 + mm_segment_t fs;
77634 fs = get_fs();
77635 set_fs(KERNEL_DS);
77636 -#if 0
77637 val = SOUND_MASK_LINE;
77638 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77639 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77640 val = initvol[i].vol;
77641 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77642 }
77643 + set_fs(fs);
77644 // cs4297a_write_ac97(s, 0x18, 0x0808);
77645 #else
77646 // cs4297a_write_ac97(s, 0x5e, 0x180);
77647 cs4297a_write_ac97(s, 0x02, 0x0808);
77648 cs4297a_write_ac97(s, 0x18, 0x0808);
77649 #endif
77650 - set_fs(fs);
77651
77652 list_add(&s->list, &cs4297a_devs);
77653
77654 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77655 index 5644711..a2aebc1 100644
77656 --- a/sound/pci/hda/hda_codec.h
77657 +++ b/sound/pci/hda/hda_codec.h
77658 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77659 /* notify power-up/down from codec to controller */
77660 void (*pm_notify)(struct hda_bus *bus);
77661 #endif
77662 -};
77663 +} __no_const;
77664
77665 /* template to pass to the bus constructor */
77666 struct hda_bus_template {
77667 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77668 #endif
77669 void (*reboot_notify)(struct hda_codec *codec);
77670 };
77671 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77672
77673 /* record for amp information cache */
77674 struct hda_cache_head {
77675 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77676 struct snd_pcm_substream *substream);
77677 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77678 struct snd_pcm_substream *substream);
77679 -};
77680 +} __no_const;
77681
77682 /* PCM information for each substream */
77683 struct hda_pcm_stream {
77684 @@ -801,7 +802,7 @@ struct hda_codec {
77685 const char *modelname; /* model name for preset */
77686
77687 /* set by patch */
77688 - struct hda_codec_ops patch_ops;
77689 + hda_codec_ops_no_const patch_ops;
77690
77691 /* PCM to create, set by patch_ops.build_pcms callback */
77692 unsigned int num_pcms;
77693 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77694 index 0da778a..bc38b84 100644
77695 --- a/sound/pci/ice1712/ice1712.h
77696 +++ b/sound/pci/ice1712/ice1712.h
77697 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77698 unsigned int mask_flags; /* total mask bits */
77699 struct snd_akm4xxx_ops {
77700 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77701 - } ops;
77702 + } __no_const ops;
77703 };
77704
77705 struct snd_ice1712_spdif {
77706 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77707 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77708 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77709 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77710 - } ops;
77711 + } __no_const ops;
77712 };
77713
77714
77715 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77716 index 03ee4e3..be86b46 100644
77717 --- a/sound/pci/ymfpci/ymfpci_main.c
77718 +++ b/sound/pci/ymfpci/ymfpci_main.c
77719 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77720 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77721 break;
77722 }
77723 - if (atomic_read(&chip->interrupt_sleep_count)) {
77724 - atomic_set(&chip->interrupt_sleep_count, 0);
77725 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77726 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77727 wake_up(&chip->interrupt_sleep);
77728 }
77729 __end:
77730 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77731 continue;
77732 init_waitqueue_entry(&wait, current);
77733 add_wait_queue(&chip->interrupt_sleep, &wait);
77734 - atomic_inc(&chip->interrupt_sleep_count);
77735 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77736 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77737 remove_wait_queue(&chip->interrupt_sleep, &wait);
77738 }
77739 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77740 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77741 spin_unlock(&chip->reg_lock);
77742
77743 - if (atomic_read(&chip->interrupt_sleep_count)) {
77744 - atomic_set(&chip->interrupt_sleep_count, 0);
77745 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77746 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77747 wake_up(&chip->interrupt_sleep);
77748 }
77749 }
77750 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77751 spin_lock_init(&chip->reg_lock);
77752 spin_lock_init(&chip->voice_lock);
77753 init_waitqueue_head(&chip->interrupt_sleep);
77754 - atomic_set(&chip->interrupt_sleep_count, 0);
77755 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77756 chip->card = card;
77757 chip->pci = pci;
77758 chip->irq = -1;
77759 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77760 index ee15337..e2187a6 100644
77761 --- a/sound/soc/soc-pcm.c
77762 +++ b/sound/soc/soc-pcm.c
77763 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77764 }
77765
77766 /* ASoC PCM operations */
77767 -static struct snd_pcm_ops soc_pcm_ops = {
77768 +static snd_pcm_ops_no_const soc_pcm_ops = {
77769 .open = soc_pcm_open,
77770 .close = soc_pcm_close,
77771 .hw_params = soc_pcm_hw_params,
77772 diff --git a/sound/usb/card.h b/sound/usb/card.h
77773 index a39edcc..1014050 100644
77774 --- a/sound/usb/card.h
77775 +++ b/sound/usb/card.h
77776 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77777 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77778 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77779 };
77780 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77781
77782 struct snd_usb_substream {
77783 struct snd_usb_stream *stream;
77784 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77785 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77786 spinlock_t lock;
77787
77788 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77789 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77790 int last_frame_number; /* stored frame number */
77791 int last_delay; /* stored delay */
77792 };
77793 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77794 new file mode 100644
77795 index 0000000..481a163
77796 --- /dev/null
77797 +++ b/tools/gcc/Makefile
77798 @@ -0,0 +1,21 @@
77799 +#CC := gcc
77800 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77801 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77802 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77803 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
77804 +
77805 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
77806 +
77807 +hostlibs-y := constify_plugin.so
77808 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77809 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77810 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77811 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77812 +
77813 +always := $(hostlibs-y)
77814 +
77815 +constify_plugin-objs := constify_plugin.o
77816 +stackleak_plugin-objs := stackleak_plugin.o
77817 +kallocstat_plugin-objs := kallocstat_plugin.o
77818 +kernexec_plugin-objs := kernexec_plugin.o
77819 +checker_plugin-objs := checker_plugin.o
77820 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77821 new file mode 100644
77822 index 0000000..d41b5af
77823 --- /dev/null
77824 +++ b/tools/gcc/checker_plugin.c
77825 @@ -0,0 +1,171 @@
77826 +/*
77827 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77828 + * Licensed under the GPL v2
77829 + *
77830 + * Note: the choice of the license means that the compilation process is
77831 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77832 + * but for the kernel it doesn't matter since it doesn't link against
77833 + * any of the gcc libraries
77834 + *
77835 + * gcc plugin to implement various sparse (source code checker) features
77836 + *
77837 + * TODO:
77838 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77839 + *
77840 + * BUGS:
77841 + * - none known
77842 + */
77843 +#include "gcc-plugin.h"
77844 +#include "config.h"
77845 +#include "system.h"
77846 +#include "coretypes.h"
77847 +#include "tree.h"
77848 +#include "tree-pass.h"
77849 +#include "flags.h"
77850 +#include "intl.h"
77851 +#include "toplev.h"
77852 +#include "plugin.h"
77853 +//#include "expr.h" where are you...
77854 +#include "diagnostic.h"
77855 +#include "plugin-version.h"
77856 +#include "tm.h"
77857 +#include "function.h"
77858 +#include "basic-block.h"
77859 +#include "gimple.h"
77860 +#include "rtl.h"
77861 +#include "emit-rtl.h"
77862 +#include "tree-flow.h"
77863 +#include "target.h"
77864 +
77865 +extern void c_register_addr_space (const char *str, addr_space_t as);
77866 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77867 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77868 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77869 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77870 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77871 +
77872 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77873 +extern rtx emit_move_insn(rtx x, rtx y);
77874 +
77875 +int plugin_is_GPL_compatible;
77876 +
77877 +static struct plugin_info checker_plugin_info = {
77878 + .version = "201111150100",
77879 +};
77880 +
77881 +#define ADDR_SPACE_KERNEL 0
77882 +#define ADDR_SPACE_FORCE_KERNEL 1
77883 +#define ADDR_SPACE_USER 2
77884 +#define ADDR_SPACE_FORCE_USER 3
77885 +#define ADDR_SPACE_IOMEM 0
77886 +#define ADDR_SPACE_FORCE_IOMEM 0
77887 +#define ADDR_SPACE_PERCPU 0
77888 +#define ADDR_SPACE_FORCE_PERCPU 0
77889 +#define ADDR_SPACE_RCU 0
77890 +#define ADDR_SPACE_FORCE_RCU 0
77891 +
77892 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77893 +{
77894 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77895 +}
77896 +
77897 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77898 +{
77899 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77900 +}
77901 +
77902 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77903 +{
77904 + return default_addr_space_valid_pointer_mode(mode, as);
77905 +}
77906 +
77907 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77908 +{
77909 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77910 +}
77911 +
77912 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77913 +{
77914 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77915 +}
77916 +
77917 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77918 +{
77919 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77920 + return true;
77921 +
77922 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77923 + return true;
77924 +
77925 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77926 + return true;
77927 +
77928 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77929 + return true;
77930 +
77931 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77932 + return true;
77933 +
77934 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77935 + return true;
77936 +
77937 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77938 + return true;
77939 +
77940 + return subset == superset;
77941 +}
77942 +
77943 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77944 +{
77945 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77946 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77947 +
77948 + return op;
77949 +}
77950 +
77951 +static void register_checker_address_spaces(void *event_data, void *data)
77952 +{
77953 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77954 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77955 + c_register_addr_space("__user", ADDR_SPACE_USER);
77956 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77957 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77958 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77959 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77960 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77961 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77962 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77963 +
77964 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77965 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77966 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77967 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77968 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77969 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77970 + targetm.addr_space.convert = checker_addr_space_convert;
77971 +}
77972 +
77973 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77974 +{
77975 + const char * const plugin_name = plugin_info->base_name;
77976 + const int argc = plugin_info->argc;
77977 + const struct plugin_argument * const argv = plugin_info->argv;
77978 + int i;
77979 +
77980 + if (!plugin_default_version_check(version, &gcc_version)) {
77981 + error(G_("incompatible gcc/plugin versions"));
77982 + return 1;
77983 + }
77984 +
77985 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77986 +
77987 + for (i = 0; i < argc; ++i)
77988 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77989 +
77990 + if (TARGET_64BIT == 0)
77991 + return 0;
77992 +
77993 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77994 +
77995 + return 0;
77996 +}
77997 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
77998 new file mode 100644
77999 index 0000000..704a564
78000 --- /dev/null
78001 +++ b/tools/gcc/constify_plugin.c
78002 @@ -0,0 +1,303 @@
78003 +/*
78004 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
78005 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
78006 + * Licensed under the GPL v2, or (at your option) v3
78007 + *
78008 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
78009 + *
78010 + * Homepage:
78011 + * http://www.grsecurity.net/~ephox/const_plugin/
78012 + *
78013 + * Usage:
78014 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
78015 + * $ gcc -fplugin=constify_plugin.so test.c -O2
78016 + */
78017 +
78018 +#include "gcc-plugin.h"
78019 +#include "config.h"
78020 +#include "system.h"
78021 +#include "coretypes.h"
78022 +#include "tree.h"
78023 +#include "tree-pass.h"
78024 +#include "flags.h"
78025 +#include "intl.h"
78026 +#include "toplev.h"
78027 +#include "plugin.h"
78028 +#include "diagnostic.h"
78029 +#include "plugin-version.h"
78030 +#include "tm.h"
78031 +#include "function.h"
78032 +#include "basic-block.h"
78033 +#include "gimple.h"
78034 +#include "rtl.h"
78035 +#include "emit-rtl.h"
78036 +#include "tree-flow.h"
78037 +
78038 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
78039 +
78040 +int plugin_is_GPL_compatible;
78041 +
78042 +static struct plugin_info const_plugin_info = {
78043 + .version = "201111150100",
78044 + .help = "no-constify\tturn off constification\n",
78045 +};
78046 +
78047 +static void constify_type(tree type);
78048 +static bool walk_struct(tree node);
78049 +
78050 +static tree deconstify_type(tree old_type)
78051 +{
78052 + tree new_type, field;
78053 +
78054 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
78055 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
78056 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
78057 + DECL_FIELD_CONTEXT(field) = new_type;
78058 + TYPE_READONLY(new_type) = 0;
78059 + C_TYPE_FIELDS_READONLY(new_type) = 0;
78060 + return new_type;
78061 +}
78062 +
78063 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78064 +{
78065 + tree type;
78066 +
78067 + *no_add_attrs = true;
78068 + if (TREE_CODE(*node) == FUNCTION_DECL) {
78069 + error("%qE attribute does not apply to functions", name);
78070 + return NULL_TREE;
78071 + }
78072 +
78073 + if (TREE_CODE(*node) == VAR_DECL) {
78074 + error("%qE attribute does not apply to variables", name);
78075 + return NULL_TREE;
78076 + }
78077 +
78078 + if (TYPE_P(*node)) {
78079 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
78080 + *no_add_attrs = false;
78081 + else
78082 + error("%qE attribute applies to struct and union types only", name);
78083 + return NULL_TREE;
78084 + }
78085 +
78086 + type = TREE_TYPE(*node);
78087 +
78088 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
78089 + error("%qE attribute applies to struct and union types only", name);
78090 + return NULL_TREE;
78091 + }
78092 +
78093 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
78094 + error("%qE attribute is already applied to the type", name);
78095 + return NULL_TREE;
78096 + }
78097 +
78098 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
78099 + error("%qE attribute used on type that is not constified", name);
78100 + return NULL_TREE;
78101 + }
78102 +
78103 + if (TREE_CODE(*node) == TYPE_DECL) {
78104 + TREE_TYPE(*node) = deconstify_type(type);
78105 + TREE_READONLY(*node) = 0;
78106 + return NULL_TREE;
78107 + }
78108 +
78109 + return NULL_TREE;
78110 +}
78111 +
78112 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
78113 +{
78114 + *no_add_attrs = true;
78115 + if (!TYPE_P(*node)) {
78116 + error("%qE attribute applies to types only", name);
78117 + return NULL_TREE;
78118 + }
78119 +
78120 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
78121 + error("%qE attribute applies to struct and union types only", name);
78122 + return NULL_TREE;
78123 + }
78124 +
78125 + *no_add_attrs = false;
78126 + constify_type(*node);
78127 + return NULL_TREE;
78128 +}
78129 +
78130 +static struct attribute_spec no_const_attr = {
78131 + .name = "no_const",
78132 + .min_length = 0,
78133 + .max_length = 0,
78134 + .decl_required = false,
78135 + .type_required = false,
78136 + .function_type_required = false,
78137 + .handler = handle_no_const_attribute,
78138 +#if BUILDING_GCC_VERSION >= 4007
78139 + .affects_type_identity = true
78140 +#endif
78141 +};
78142 +
78143 +static struct attribute_spec do_const_attr = {
78144 + .name = "do_const",
78145 + .min_length = 0,
78146 + .max_length = 0,
78147 + .decl_required = false,
78148 + .type_required = false,
78149 + .function_type_required = false,
78150 + .handler = handle_do_const_attribute,
78151 +#if BUILDING_GCC_VERSION >= 4007
78152 + .affects_type_identity = true
78153 +#endif
78154 +};
78155 +
78156 +static void register_attributes(void *event_data, void *data)
78157 +{
78158 + register_attribute(&no_const_attr);
78159 + register_attribute(&do_const_attr);
78160 +}
78161 +
78162 +static void constify_type(tree type)
78163 +{
78164 + TYPE_READONLY(type) = 1;
78165 + C_TYPE_FIELDS_READONLY(type) = 1;
78166 +}
78167 +
78168 +static bool is_fptr(tree field)
78169 +{
78170 + tree ptr = TREE_TYPE(field);
78171 +
78172 + if (TREE_CODE(ptr) != POINTER_TYPE)
78173 + return false;
78174 +
78175 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
78176 +}
78177 +
78178 +static bool walk_struct(tree node)
78179 +{
78180 + tree field;
78181 +
78182 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
78183 + return false;
78184 +
78185 + if (TYPE_FIELDS(node) == NULL_TREE)
78186 + return false;
78187 +
78188 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
78189 + tree type = TREE_TYPE(field);
78190 + enum tree_code code = TREE_CODE(type);
78191 + if (code == RECORD_TYPE || code == UNION_TYPE) {
78192 + if (!(walk_struct(type)))
78193 + return false;
78194 + } else if (!is_fptr(field) && !TREE_READONLY(field))
78195 + return false;
78196 + }
78197 + return true;
78198 +}
78199 +
78200 +static void finish_type(void *event_data, void *data)
78201 +{
78202 + tree type = (tree)event_data;
78203 +
78204 + if (type == NULL_TREE)
78205 + return;
78206 +
78207 + if (TYPE_READONLY(type))
78208 + return;
78209 +
78210 + if (walk_struct(type))
78211 + constify_type(type);
78212 +}
78213 +
78214 +static unsigned int check_local_variables(void);
78215 +
78216 +struct gimple_opt_pass pass_local_variable = {
78217 + {
78218 + .type = GIMPLE_PASS,
78219 + .name = "check_local_variables",
78220 + .gate = NULL,
78221 + .execute = check_local_variables,
78222 + .sub = NULL,
78223 + .next = NULL,
78224 + .static_pass_number = 0,
78225 + .tv_id = TV_NONE,
78226 + .properties_required = 0,
78227 + .properties_provided = 0,
78228 + .properties_destroyed = 0,
78229 + .todo_flags_start = 0,
78230 + .todo_flags_finish = 0
78231 + }
78232 +};
78233 +
78234 +static unsigned int check_local_variables(void)
78235 +{
78236 + tree var;
78237 + referenced_var_iterator rvi;
78238 +
78239 +#if BUILDING_GCC_VERSION == 4005
78240 + FOR_EACH_REFERENCED_VAR(var, rvi) {
78241 +#else
78242 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
78243 +#endif
78244 + tree type = TREE_TYPE(var);
78245 +
78246 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
78247 + continue;
78248 +
78249 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
78250 + continue;
78251 +
78252 + if (!TYPE_READONLY(type))
78253 + continue;
78254 +
78255 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
78256 +// continue;
78257 +
78258 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
78259 +// continue;
78260 +
78261 + if (walk_struct(type)) {
78262 + error("constified variable %qE cannot be local", var);
78263 + return 1;
78264 + }
78265 + }
78266 + return 0;
78267 +}
78268 +
78269 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78270 +{
78271 + const char * const plugin_name = plugin_info->base_name;
78272 + const int argc = plugin_info->argc;
78273 + const struct plugin_argument * const argv = plugin_info->argv;
78274 + int i;
78275 + bool constify = true;
78276 +
78277 + struct register_pass_info local_variable_pass_info = {
78278 + .pass = &pass_local_variable.pass,
78279 + .reference_pass_name = "*referenced_vars",
78280 + .ref_pass_instance_number = 0,
78281 + .pos_op = PASS_POS_INSERT_AFTER
78282 + };
78283 +
78284 + if (!plugin_default_version_check(version, &gcc_version)) {
78285 + error(G_("incompatible gcc/plugin versions"));
78286 + return 1;
78287 + }
78288 +
78289 + for (i = 0; i < argc; ++i) {
78290 + if (!(strcmp(argv[i].key, "no-constify"))) {
78291 + constify = false;
78292 + continue;
78293 + }
78294 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78295 + }
78296 +
78297 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
78298 + if (constify) {
78299 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
78300 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
78301 + }
78302 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
78303 +
78304 + return 0;
78305 +}
78306 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
78307 new file mode 100644
78308 index 0000000..a5eabce
78309 --- /dev/null
78310 +++ b/tools/gcc/kallocstat_plugin.c
78311 @@ -0,0 +1,167 @@
78312 +/*
78313 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78314 + * Licensed under the GPL v2
78315 + *
78316 + * Note: the choice of the license means that the compilation process is
78317 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78318 + * but for the kernel it doesn't matter since it doesn't link against
78319 + * any of the gcc libraries
78320 + *
78321 + * gcc plugin to find the distribution of k*alloc sizes
78322 + *
78323 + * TODO:
78324 + *
78325 + * BUGS:
78326 + * - none known
78327 + */
78328 +#include "gcc-plugin.h"
78329 +#include "config.h"
78330 +#include "system.h"
78331 +#include "coretypes.h"
78332 +#include "tree.h"
78333 +#include "tree-pass.h"
78334 +#include "flags.h"
78335 +#include "intl.h"
78336 +#include "toplev.h"
78337 +#include "plugin.h"
78338 +//#include "expr.h" where are you...
78339 +#include "diagnostic.h"
78340 +#include "plugin-version.h"
78341 +#include "tm.h"
78342 +#include "function.h"
78343 +#include "basic-block.h"
78344 +#include "gimple.h"
78345 +#include "rtl.h"
78346 +#include "emit-rtl.h"
78347 +
78348 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78349 +
78350 +int plugin_is_GPL_compatible;
78351 +
78352 +static const char * const kalloc_functions[] = {
78353 + "__kmalloc",
78354 + "kmalloc",
78355 + "kmalloc_large",
78356 + "kmalloc_node",
78357 + "kmalloc_order",
78358 + "kmalloc_order_trace",
78359 + "kmalloc_slab",
78360 + "kzalloc",
78361 + "kzalloc_node",
78362 +};
78363 +
78364 +static struct plugin_info kallocstat_plugin_info = {
78365 + .version = "201111150100",
78366 +};
78367 +
78368 +static unsigned int execute_kallocstat(void);
78369 +
78370 +static struct gimple_opt_pass kallocstat_pass = {
78371 + .pass = {
78372 + .type = GIMPLE_PASS,
78373 + .name = "kallocstat",
78374 + .gate = NULL,
78375 + .execute = execute_kallocstat,
78376 + .sub = NULL,
78377 + .next = NULL,
78378 + .static_pass_number = 0,
78379 + .tv_id = TV_NONE,
78380 + .properties_required = 0,
78381 + .properties_provided = 0,
78382 + .properties_destroyed = 0,
78383 + .todo_flags_start = 0,
78384 + .todo_flags_finish = 0
78385 + }
78386 +};
78387 +
78388 +static bool is_kalloc(const char *fnname)
78389 +{
78390 + size_t i;
78391 +
78392 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78393 + if (!strcmp(fnname, kalloc_functions[i]))
78394 + return true;
78395 + return false;
78396 +}
78397 +
78398 +static unsigned int execute_kallocstat(void)
78399 +{
78400 + basic_block bb;
78401 +
78402 + // 1. loop through BBs and GIMPLE statements
78403 + FOR_EACH_BB(bb) {
78404 + gimple_stmt_iterator gsi;
78405 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78406 + // gimple match:
78407 + tree fndecl, size;
78408 + gimple call_stmt;
78409 + const char *fnname;
78410 +
78411 + // is it a call
78412 + call_stmt = gsi_stmt(gsi);
78413 + if (!is_gimple_call(call_stmt))
78414 + continue;
78415 + fndecl = gimple_call_fndecl(call_stmt);
78416 + if (fndecl == NULL_TREE)
78417 + continue;
78418 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78419 + continue;
78420 +
78421 + // is it a call to k*alloc
78422 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78423 + if (!is_kalloc(fnname))
78424 + continue;
78425 +
78426 + // is the size arg the result of a simple const assignment
78427 + size = gimple_call_arg(call_stmt, 0);
78428 + while (true) {
78429 + gimple def_stmt;
78430 + expanded_location xloc;
78431 + size_t size_val;
78432 +
78433 + if (TREE_CODE(size) != SSA_NAME)
78434 + break;
78435 + def_stmt = SSA_NAME_DEF_STMT(size);
78436 + if (!def_stmt || !is_gimple_assign(def_stmt))
78437 + break;
78438 + if (gimple_num_ops(def_stmt) != 2)
78439 + break;
78440 + size = gimple_assign_rhs1(def_stmt);
78441 + if (!TREE_CONSTANT(size))
78442 + continue;
78443 + xloc = expand_location(gimple_location(def_stmt));
78444 + if (!xloc.file)
78445 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78446 + size_val = TREE_INT_CST_LOW(size);
78447 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78448 + break;
78449 + }
78450 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78451 +//debug_tree(gimple_call_fn(call_stmt));
78452 +//print_node(stderr, "pax", fndecl, 4);
78453 + }
78454 + }
78455 +
78456 + return 0;
78457 +}
78458 +
78459 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78460 +{
78461 + const char * const plugin_name = plugin_info->base_name;
78462 + struct register_pass_info kallocstat_pass_info = {
78463 + .pass = &kallocstat_pass.pass,
78464 + .reference_pass_name = "ssa",
78465 + .ref_pass_instance_number = 0,
78466 + .pos_op = PASS_POS_INSERT_AFTER
78467 + };
78468 +
78469 + if (!plugin_default_version_check(version, &gcc_version)) {
78470 + error(G_("incompatible gcc/plugin versions"));
78471 + return 1;
78472 + }
78473 +
78474 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78475 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78476 +
78477 + return 0;
78478 +}
78479 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78480 new file mode 100644
78481 index 0000000..008f159
78482 --- /dev/null
78483 +++ b/tools/gcc/kernexec_plugin.c
78484 @@ -0,0 +1,427 @@
78485 +/*
78486 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78487 + * Licensed under the GPL v2
78488 + *
78489 + * Note: the choice of the license means that the compilation process is
78490 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78491 + * but for the kernel it doesn't matter since it doesn't link against
78492 + * any of the gcc libraries
78493 + *
78494 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78495 + *
78496 + * TODO:
78497 + *
78498 + * BUGS:
78499 + * - none known
78500 + */
78501 +#include "gcc-plugin.h"
78502 +#include "config.h"
78503 +#include "system.h"
78504 +#include "coretypes.h"
78505 +#include "tree.h"
78506 +#include "tree-pass.h"
78507 +#include "flags.h"
78508 +#include "intl.h"
78509 +#include "toplev.h"
78510 +#include "plugin.h"
78511 +//#include "expr.h" where are you...
78512 +#include "diagnostic.h"
78513 +#include "plugin-version.h"
78514 +#include "tm.h"
78515 +#include "function.h"
78516 +#include "basic-block.h"
78517 +#include "gimple.h"
78518 +#include "rtl.h"
78519 +#include "emit-rtl.h"
78520 +#include "tree-flow.h"
78521 +
78522 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78523 +extern rtx emit_move_insn(rtx x, rtx y);
78524 +
78525 +int plugin_is_GPL_compatible;
78526 +
78527 +static struct plugin_info kernexec_plugin_info = {
78528 + .version = "201111291120",
78529 + .help = "method=[bts|or]\tinstrumentation method\n"
78530 +};
78531 +
78532 +static unsigned int execute_kernexec_reload(void);
78533 +static unsigned int execute_kernexec_fptr(void);
78534 +static unsigned int execute_kernexec_retaddr(void);
78535 +static bool kernexec_cmodel_check(void);
78536 +
78537 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
78538 +static void (*kernexec_instrument_retaddr)(rtx);
78539 +
78540 +static struct gimple_opt_pass kernexec_reload_pass = {
78541 + .pass = {
78542 + .type = GIMPLE_PASS,
78543 + .name = "kernexec_reload",
78544 + .gate = kernexec_cmodel_check,
78545 + .execute = execute_kernexec_reload,
78546 + .sub = NULL,
78547 + .next = NULL,
78548 + .static_pass_number = 0,
78549 + .tv_id = TV_NONE,
78550 + .properties_required = 0,
78551 + .properties_provided = 0,
78552 + .properties_destroyed = 0,
78553 + .todo_flags_start = 0,
78554 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78555 + }
78556 +};
78557 +
78558 +static struct gimple_opt_pass kernexec_fptr_pass = {
78559 + .pass = {
78560 + .type = GIMPLE_PASS,
78561 + .name = "kernexec_fptr",
78562 + .gate = kernexec_cmodel_check,
78563 + .execute = execute_kernexec_fptr,
78564 + .sub = NULL,
78565 + .next = NULL,
78566 + .static_pass_number = 0,
78567 + .tv_id = TV_NONE,
78568 + .properties_required = 0,
78569 + .properties_provided = 0,
78570 + .properties_destroyed = 0,
78571 + .todo_flags_start = 0,
78572 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78573 + }
78574 +};
78575 +
78576 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78577 + .pass = {
78578 + .type = RTL_PASS,
78579 + .name = "kernexec_retaddr",
78580 + .gate = kernexec_cmodel_check,
78581 + .execute = execute_kernexec_retaddr,
78582 + .sub = NULL,
78583 + .next = NULL,
78584 + .static_pass_number = 0,
78585 + .tv_id = TV_NONE,
78586 + .properties_required = 0,
78587 + .properties_provided = 0,
78588 + .properties_destroyed = 0,
78589 + .todo_flags_start = 0,
78590 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78591 + }
78592 +};
78593 +
78594 +static bool kernexec_cmodel_check(void)
78595 +{
78596 + tree section;
78597 +
78598 + if (ix86_cmodel != CM_KERNEL)
78599 + return false;
78600 +
78601 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78602 + if (!section || !TREE_VALUE(section))
78603 + return true;
78604 +
78605 + section = TREE_VALUE(TREE_VALUE(section));
78606 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78607 + return true;
78608 +
78609 + return false;
78610 +}
78611 +
78612 +/*
78613 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
78614 + */
78615 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
78616 +{
78617 + gimple asm_movabs_stmt;
78618 +
78619 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
78620 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
78621 + gimple_asm_set_volatile(asm_movabs_stmt, true);
78622 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
78623 + update_stmt(asm_movabs_stmt);
78624 +}
78625 +
78626 +/*
78627 + * find all asm() stmts that clobber r10 and add a reload of r10
78628 + */
78629 +static unsigned int execute_kernexec_reload(void)
78630 +{
78631 + basic_block bb;
78632 +
78633 + // 1. loop through BBs and GIMPLE statements
78634 + FOR_EACH_BB(bb) {
78635 + gimple_stmt_iterator gsi;
78636 +
78637 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78638 + // gimple match: __asm__ ("" : : : "r10");
78639 + gimple asm_stmt;
78640 + size_t nclobbers;
78641 +
78642 + // is it an asm ...
78643 + asm_stmt = gsi_stmt(gsi);
78644 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
78645 + continue;
78646 +
78647 + // ... clobbering r10
78648 + nclobbers = gimple_asm_nclobbers(asm_stmt);
78649 + while (nclobbers--) {
78650 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
78651 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
78652 + continue;
78653 + kernexec_reload_fptr_mask(&gsi);
78654 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
78655 + break;
78656 + }
78657 + }
78658 + }
78659 +
78660 + return 0;
78661 +}
78662 +
78663 +/*
78664 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78665 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78666 + */
78667 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
78668 +{
78669 + gimple assign_intptr, assign_new_fptr, call_stmt;
78670 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78671 +
78672 + call_stmt = gsi_stmt(*gsi);
78673 + old_fptr = gimple_call_fn(call_stmt);
78674 +
78675 + // create temporary unsigned long variable used for bitops and cast fptr to it
78676 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78677 + add_referenced_var(intptr);
78678 + mark_sym_for_renaming(intptr);
78679 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78680 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78681 + update_stmt(assign_intptr);
78682 +
78683 + // apply logical or to temporary unsigned long and bitmask
78684 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78685 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78686 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78687 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78688 + update_stmt(assign_intptr);
78689 +
78690 + // cast temporary unsigned long back to a temporary fptr variable
78691 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78692 + add_referenced_var(new_fptr);
78693 + mark_sym_for_renaming(new_fptr);
78694 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78695 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
78696 + update_stmt(assign_new_fptr);
78697 +
78698 + // replace call stmt fn with the new fptr
78699 + gimple_call_set_fn(call_stmt, new_fptr);
78700 + update_stmt(call_stmt);
78701 +}
78702 +
78703 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
78704 +{
78705 + gimple asm_or_stmt, call_stmt;
78706 + tree old_fptr, new_fptr, input, output;
78707 + VEC(tree, gc) *inputs = NULL;
78708 + VEC(tree, gc) *outputs = NULL;
78709 +
78710 + call_stmt = gsi_stmt(*gsi);
78711 + old_fptr = gimple_call_fn(call_stmt);
78712 +
78713 + // create temporary fptr variable
78714 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78715 + add_referenced_var(new_fptr);
78716 + mark_sym_for_renaming(new_fptr);
78717 +
78718 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78719 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78720 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78721 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78722 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78723 + VEC_safe_push(tree, gc, inputs, input);
78724 + VEC_safe_push(tree, gc, outputs, output);
78725 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78726 + gimple_asm_set_volatile(asm_or_stmt, true);
78727 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
78728 + update_stmt(asm_or_stmt);
78729 +
78730 + // replace call stmt fn with the new fptr
78731 + gimple_call_set_fn(call_stmt, new_fptr);
78732 + update_stmt(call_stmt);
78733 +}
78734 +
78735 +/*
78736 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78737 + */
78738 +static unsigned int execute_kernexec_fptr(void)
78739 +{
78740 + basic_block bb;
78741 +
78742 + // 1. loop through BBs and GIMPLE statements
78743 + FOR_EACH_BB(bb) {
78744 + gimple_stmt_iterator gsi;
78745 +
78746 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78747 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78748 + tree fn;
78749 + gimple call_stmt;
78750 +
78751 + // is it a call ...
78752 + call_stmt = gsi_stmt(gsi);
78753 + if (!is_gimple_call(call_stmt))
78754 + continue;
78755 + fn = gimple_call_fn(call_stmt);
78756 + if (TREE_CODE(fn) == ADDR_EXPR)
78757 + continue;
78758 + if (TREE_CODE(fn) != SSA_NAME)
78759 + gcc_unreachable();
78760 +
78761 + // ... through a function pointer
78762 + fn = SSA_NAME_VAR(fn);
78763 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78764 + continue;
78765 + fn = TREE_TYPE(fn);
78766 + if (TREE_CODE(fn) != POINTER_TYPE)
78767 + continue;
78768 + fn = TREE_TYPE(fn);
78769 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78770 + continue;
78771 +
78772 + kernexec_instrument_fptr(&gsi);
78773 +
78774 +//debug_tree(gimple_call_fn(call_stmt));
78775 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78776 + }
78777 + }
78778 +
78779 + return 0;
78780 +}
78781 +
78782 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78783 +static void kernexec_instrument_retaddr_bts(rtx insn)
78784 +{
78785 + rtx btsq;
78786 + rtvec argvec, constraintvec, labelvec;
78787 + int line;
78788 +
78789 + // create asm volatile("btsq $63,(%%rsp)":::)
78790 + argvec = rtvec_alloc(0);
78791 + constraintvec = rtvec_alloc(0);
78792 + labelvec = rtvec_alloc(0);
78793 + line = expand_location(RTL_LOCATION(insn)).line;
78794 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78795 + MEM_VOLATILE_P(btsq) = 1;
78796 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78797 + emit_insn_before(btsq, insn);
78798 +}
78799 +
78800 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78801 +static void kernexec_instrument_retaddr_or(rtx insn)
78802 +{
78803 + rtx orq;
78804 + rtvec argvec, constraintvec, labelvec;
78805 + int line;
78806 +
78807 + // create asm volatile("orq %%r10,(%%rsp)":::)
78808 + argvec = rtvec_alloc(0);
78809 + constraintvec = rtvec_alloc(0);
78810 + labelvec = rtvec_alloc(0);
78811 + line = expand_location(RTL_LOCATION(insn)).line;
78812 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78813 + MEM_VOLATILE_P(orq) = 1;
78814 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78815 + emit_insn_before(orq, insn);
78816 +}
78817 +
78818 +/*
78819 + * find all asm level function returns and forcibly set the highest bit of the return address
78820 + */
78821 +static unsigned int execute_kernexec_retaddr(void)
78822 +{
78823 + rtx insn;
78824 +
78825 + // 1. find function returns
78826 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78827 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78828 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78829 + rtx body;
78830 +
78831 + // is it a retn
78832 + if (!JUMP_P(insn))
78833 + continue;
78834 + body = PATTERN(insn);
78835 + if (GET_CODE(body) == PARALLEL)
78836 + body = XVECEXP(body, 0, 0);
78837 + if (GET_CODE(body) != RETURN)
78838 + continue;
78839 + kernexec_instrument_retaddr(insn);
78840 + }
78841 +
78842 +// print_simple_rtl(stderr, get_insns());
78843 +// print_rtl(stderr, get_insns());
78844 +
78845 + return 0;
78846 +}
78847 +
78848 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78849 +{
78850 + const char * const plugin_name = plugin_info->base_name;
78851 + const int argc = plugin_info->argc;
78852 + const struct plugin_argument * const argv = plugin_info->argv;
78853 + int i;
78854 + struct register_pass_info kernexec_reload_pass_info = {
78855 + .pass = &kernexec_reload_pass.pass,
78856 + .reference_pass_name = "ssa",
78857 + .ref_pass_instance_number = 0,
78858 + .pos_op = PASS_POS_INSERT_AFTER
78859 + };
78860 + struct register_pass_info kernexec_fptr_pass_info = {
78861 + .pass = &kernexec_fptr_pass.pass,
78862 + .reference_pass_name = "ssa",
78863 + .ref_pass_instance_number = 0,
78864 + .pos_op = PASS_POS_INSERT_AFTER
78865 + };
78866 + struct register_pass_info kernexec_retaddr_pass_info = {
78867 + .pass = &kernexec_retaddr_pass.pass,
78868 + .reference_pass_name = "pro_and_epilogue",
78869 + .ref_pass_instance_number = 0,
78870 + .pos_op = PASS_POS_INSERT_AFTER
78871 + };
78872 +
78873 + if (!plugin_default_version_check(version, &gcc_version)) {
78874 + error(G_("incompatible gcc/plugin versions"));
78875 + return 1;
78876 + }
78877 +
78878 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78879 +
78880 + if (TARGET_64BIT == 0)
78881 + return 0;
78882 +
78883 + for (i = 0; i < argc; ++i) {
78884 + if (!strcmp(argv[i].key, "method")) {
78885 + if (!argv[i].value) {
78886 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78887 + continue;
78888 + }
78889 + if (!strcmp(argv[i].value, "bts")) {
78890 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78891 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78892 + } else if (!strcmp(argv[i].value, "or")) {
78893 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78894 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78895 + fix_register("r10", 1, 1);
78896 + } else
78897 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78898 + continue;
78899 + }
78900 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78901 + }
78902 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78903 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78904 +
78905 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
78906 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
78907 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78908 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78909 +
78910 + return 0;
78911 +}
78912 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78913 new file mode 100644
78914 index 0000000..4a9b187
78915 --- /dev/null
78916 +++ b/tools/gcc/stackleak_plugin.c
78917 @@ -0,0 +1,326 @@
78918 +/*
78919 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78920 + * Licensed under the GPL v2
78921 + *
78922 + * Note: the choice of the license means that the compilation process is
78923 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78924 + * but for the kernel it doesn't matter since it doesn't link against
78925 + * any of the gcc libraries
78926 + *
78927 + * gcc plugin to help implement various PaX features
78928 + *
78929 + * - track lowest stack pointer
78930 + *
78931 + * TODO:
78932 + * - initialize all local variables
78933 + *
78934 + * BUGS:
78935 + * - none known
78936 + */
78937 +#include "gcc-plugin.h"
78938 +#include "config.h"
78939 +#include "system.h"
78940 +#include "coretypes.h"
78941 +#include "tree.h"
78942 +#include "tree-pass.h"
78943 +#include "flags.h"
78944 +#include "intl.h"
78945 +#include "toplev.h"
78946 +#include "plugin.h"
78947 +//#include "expr.h" where are you...
78948 +#include "diagnostic.h"
78949 +#include "plugin-version.h"
78950 +#include "tm.h"
78951 +#include "function.h"
78952 +#include "basic-block.h"
78953 +#include "gimple.h"
78954 +#include "rtl.h"
78955 +#include "emit-rtl.h"
78956 +
78957 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78958 +
78959 +int plugin_is_GPL_compatible;
78960 +
78961 +static int track_frame_size = -1;
78962 +static const char track_function[] = "pax_track_stack";
78963 +static const char check_function[] = "pax_check_alloca";
78964 +static tree pax_check_alloca_decl;
78965 +static tree pax_track_stack_decl;
78966 +static bool init_locals;
78967 +
78968 +static struct plugin_info stackleak_plugin_info = {
78969 + .version = "201203021600",
78970 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78971 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78972 +};
78973 +
78974 +static bool gate_stackleak_track_stack(void);
78975 +static unsigned int execute_stackleak_tree_instrument(void);
78976 +static unsigned int execute_stackleak_final(void);
78977 +
78978 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78979 + .pass = {
78980 + .type = GIMPLE_PASS,
78981 + .name = "stackleak_tree_instrument",
78982 + .gate = gate_stackleak_track_stack,
78983 + .execute = execute_stackleak_tree_instrument,
78984 + .sub = NULL,
78985 + .next = NULL,
78986 + .static_pass_number = 0,
78987 + .tv_id = TV_NONE,
78988 + .properties_required = PROP_gimple_leh | PROP_cfg,
78989 + .properties_provided = 0,
78990 + .properties_destroyed = 0,
78991 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78992 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
78993 + }
78994 +};
78995 +
78996 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78997 + .pass = {
78998 + .type = RTL_PASS,
78999 + .name = "stackleak_final",
79000 + .gate = gate_stackleak_track_stack,
79001 + .execute = execute_stackleak_final,
79002 + .sub = NULL,
79003 + .next = NULL,
79004 + .static_pass_number = 0,
79005 + .tv_id = TV_NONE,
79006 + .properties_required = 0,
79007 + .properties_provided = 0,
79008 + .properties_destroyed = 0,
79009 + .todo_flags_start = 0,
79010 + .todo_flags_finish = TODO_dump_func
79011 + }
79012 +};
79013 +
79014 +static bool gate_stackleak_track_stack(void)
79015 +{
79016 + return track_frame_size >= 0;
79017 +}
79018 +
79019 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
79020 +{
79021 + gimple check_alloca;
79022 + tree alloca_size;
79023 +
79024 + // insert call to void pax_check_alloca(unsigned long size)
79025 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
79026 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
79027 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
79028 +}
79029 +
79030 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
79031 +{
79032 + gimple track_stack;
79033 +
79034 + // insert call to void pax_track_stack(void)
79035 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
79036 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
79037 +}
79038 +
79039 +#if BUILDING_GCC_VERSION == 4005
79040 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
79041 +{
79042 + tree fndecl;
79043 +
79044 + if (!is_gimple_call(stmt))
79045 + return false;
79046 + fndecl = gimple_call_fndecl(stmt);
79047 + if (!fndecl)
79048 + return false;
79049 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
79050 + return false;
79051 +// print_node(stderr, "pax", fndecl, 4);
79052 + return DECL_FUNCTION_CODE(fndecl) == code;
79053 +}
79054 +#endif
79055 +
79056 +static bool is_alloca(gimple stmt)
79057 +{
79058 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
79059 + return true;
79060 +
79061 +#if BUILDING_GCC_VERSION >= 4007
79062 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
79063 + return true;
79064 +#endif
79065 +
79066 + return false;
79067 +}
79068 +
79069 +static unsigned int execute_stackleak_tree_instrument(void)
79070 +{
79071 + basic_block bb, entry_bb;
79072 + bool prologue_instrumented = false, is_leaf = true;
79073 +
79074 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
79075 +
79076 + // 1. loop through BBs and GIMPLE statements
79077 + FOR_EACH_BB(bb) {
79078 + gimple_stmt_iterator gsi;
79079 +
79080 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
79081 + gimple stmt;
79082 +
79083 + stmt = gsi_stmt(gsi);
79084 +
79085 + if (is_gimple_call(stmt))
79086 + is_leaf = false;
79087 +
79088 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
79089 + if (!is_alloca(stmt))
79090 + continue;
79091 +
79092 + // 2. insert stack overflow check before each __builtin_alloca call
79093 + stackleak_check_alloca(&gsi);
79094 +
79095 + // 3. insert track call after each __builtin_alloca call
79096 + stackleak_add_instrumentation(&gsi);
79097 + if (bb == entry_bb)
79098 + prologue_instrumented = true;
79099 + }
79100 + }
79101 +
79102 + // special case for some bad linux code: taking the address of static inline functions will materialize them
79103 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
79104 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
79105 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
79106 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
79107 + return 0;
79108 +
79109 + // 4. insert track call at the beginning
79110 + if (!prologue_instrumented) {
79111 + gimple_stmt_iterator gsi;
79112 +
79113 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
79114 + if (dom_info_available_p(CDI_DOMINATORS))
79115 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
79116 + gsi = gsi_start_bb(bb);
79117 + stackleak_add_instrumentation(&gsi);
79118 + }
79119 +
79120 + return 0;
79121 +}
79122 +
79123 +static unsigned int execute_stackleak_final(void)
79124 +{
79125 + rtx insn;
79126 +
79127 + if (cfun->calls_alloca)
79128 + return 0;
79129 +
79130 + // keep calls only if function frame is big enough
79131 + if (get_frame_size() >= track_frame_size)
79132 + return 0;
79133 +
79134 + // 1. find pax_track_stack calls
79135 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
79136 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
79137 + rtx body;
79138 +
79139 + if (!CALL_P(insn))
79140 + continue;
79141 + body = PATTERN(insn);
79142 + if (GET_CODE(body) != CALL)
79143 + continue;
79144 + body = XEXP(body, 0);
79145 + if (GET_CODE(body) != MEM)
79146 + continue;
79147 + body = XEXP(body, 0);
79148 + if (GET_CODE(body) != SYMBOL_REF)
79149 + continue;
79150 + if (strcmp(XSTR(body, 0), track_function))
79151 + continue;
79152 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79153 + // 2. delete call
79154 + insn = delete_insn_and_edges(insn);
79155 +#if BUILDING_GCC_VERSION >= 4007
79156 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
79157 + insn = delete_insn_and_edges(insn);
79158 +#endif
79159 + }
79160 +
79161 +// print_simple_rtl(stderr, get_insns());
79162 +// print_rtl(stderr, get_insns());
79163 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
79164 +
79165 + return 0;
79166 +}
79167 +
79168 +static void stackleak_start_unit(void *gcc_data, void *user_dat)
79169 +{
79170 + tree fntype;
79171 +
79172 + // declare void pax_check_alloca(unsigned long size)
79173 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
79174 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
79175 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
79176 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
79177 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
79178 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
79179 +
79180 + // declare void pax_track_stack(void)
79181 + fntype = build_function_type_list(void_type_node, NULL_TREE);
79182 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
79183 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
79184 + TREE_PUBLIC(pax_track_stack_decl) = 1;
79185 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
79186 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
79187 +}
79188 +
79189 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79190 +{
79191 + const char * const plugin_name = plugin_info->base_name;
79192 + const int argc = plugin_info->argc;
79193 + const struct plugin_argument * const argv = plugin_info->argv;
79194 + int i;
79195 + struct register_pass_info stackleak_tree_instrument_pass_info = {
79196 + .pass = &stackleak_tree_instrument_pass.pass,
79197 +// .reference_pass_name = "tree_profile",
79198 + .reference_pass_name = "optimized",
79199 + .ref_pass_instance_number = 0,
79200 + .pos_op = PASS_POS_INSERT_BEFORE
79201 + };
79202 + struct register_pass_info stackleak_final_pass_info = {
79203 + .pass = &stackleak_final_rtl_opt_pass.pass,
79204 + .reference_pass_name = "final",
79205 + .ref_pass_instance_number = 0,
79206 + .pos_op = PASS_POS_INSERT_BEFORE
79207 + };
79208 +
79209 + if (!plugin_default_version_check(version, &gcc_version)) {
79210 + error(G_("incompatible gcc/plugin versions"));
79211 + return 1;
79212 + }
79213 +
79214 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
79215 +
79216 + for (i = 0; i < argc; ++i) {
79217 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
79218 + if (!argv[i].value) {
79219 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79220 + continue;
79221 + }
79222 + track_frame_size = atoi(argv[i].value);
79223 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
79224 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79225 + continue;
79226 + }
79227 + if (!strcmp(argv[i].key, "initialize-locals")) {
79228 + if (argv[i].value) {
79229 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
79230 + continue;
79231 + }
79232 + init_locals = true;
79233 + continue;
79234 + }
79235 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79236 + }
79237 +
79238 + register_callback("start_unit", PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
79239 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
79240 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
79241 +
79242 + return 0;
79243 +}
79244 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
79245 index 6789d78..4afd019 100644
79246 --- a/tools/perf/util/include/asm/alternative-asm.h
79247 +++ b/tools/perf/util/include/asm/alternative-asm.h
79248 @@ -5,4 +5,7 @@
79249
79250 #define altinstruction_entry #
79251
79252 + .macro pax_force_retaddr rip=0, reload=0
79253 + .endm
79254 +
79255 #endif
79256 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
79257 index af0f22f..9a7d479 100644
79258 --- a/usr/gen_init_cpio.c
79259 +++ b/usr/gen_init_cpio.c
79260 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
79261 int retval;
79262 int rc = -1;
79263 int namesize;
79264 - int i;
79265 + unsigned int i;
79266
79267 mode |= S_IFREG;
79268
79269 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
79270 *env_var = *expanded = '\0';
79271 strncat(env_var, start + 2, end - start - 2);
79272 strncat(expanded, new_location, start - new_location);
79273 - strncat(expanded, getenv(env_var), PATH_MAX);
79274 - strncat(expanded, end + 1, PATH_MAX);
79275 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
79276 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
79277 strncpy(new_location, expanded, PATH_MAX);
79278 + new_location[PATH_MAX] = 0;
79279 } else
79280 break;
79281 }
79282 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
79283 index d9cfb78..4f27c10 100644
79284 --- a/virt/kvm/kvm_main.c
79285 +++ b/virt/kvm/kvm_main.c
79286 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
79287
79288 static cpumask_var_t cpus_hardware_enabled;
79289 static int kvm_usage_count = 0;
79290 -static atomic_t hardware_enable_failed;
79291 +static atomic_unchecked_t hardware_enable_failed;
79292
79293 struct kmem_cache *kvm_vcpu_cache;
79294 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79295 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
79296
79297 if (r) {
79298 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
79299 - atomic_inc(&hardware_enable_failed);
79300 + atomic_inc_unchecked(&hardware_enable_failed);
79301 printk(KERN_INFO "kvm: enabling virtualization on "
79302 "CPU%d failed\n", cpu);
79303 }
79304 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
79305
79306 kvm_usage_count++;
79307 if (kvm_usage_count == 1) {
79308 - atomic_set(&hardware_enable_failed, 0);
79309 + atomic_set_unchecked(&hardware_enable_failed, 0);
79310 on_each_cpu(hardware_enable_nolock, NULL, 1);
79311
79312 - if (atomic_read(&hardware_enable_failed)) {
79313 + if (atomic_read_unchecked(&hardware_enable_failed)) {
79314 hardware_disable_all_nolock();
79315 r = -EBUSY;
79316 }
79317 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
79318 kvm_arch_vcpu_put(vcpu);
79319 }
79320
79321 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79322 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79323 struct module *module)
79324 {
79325 int r;
79326 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79327 if (!vcpu_align)
79328 vcpu_align = __alignof__(struct kvm_vcpu);
79329 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
79330 - 0, NULL);
79331 + SLAB_USERCOPY, NULL);
79332 if (!kvm_vcpu_cache) {
79333 r = -ENOMEM;
79334 goto out_free_3;
79335 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79336 if (r)
79337 goto out_free;
79338
79339 - kvm_chardev_ops.owner = module;
79340 - kvm_vm_fops.owner = module;
79341 - kvm_vcpu_fops.owner = module;
79342 + pax_open_kernel();
79343 + *(void **)&kvm_chardev_ops.owner = module;
79344 + *(void **)&kvm_vm_fops.owner = module;
79345 + *(void **)&kvm_vcpu_fops.owner = module;
79346 + pax_close_kernel();
79347
79348 r = misc_register(&kvm_dev);
79349 if (r) {